def check_files(): import os from common import paths def check_file(filepath): if not os.path.exists(filepath): print('File %s not found.' % filepath) check_file(paths.database_file()) check_file(paths.images_file()) check_file(paths.theta_file()) check_file(paths.codes_file()) check_file(paths.test_images_file()) check_file(paths.train_images_file()) check_file(paths.test_theta_file()) check_file(paths.train_theta_file()) check_file(paths.test_codes_file()) check_file(paths.train_codes_file()) check_file(paths.emnist_test_images_file()) check_file(paths.emnist_train_images_file()) check_file(paths.emnist_test_labels_file()) check_file(paths.emnist_train_labels_file()) check_file(paths.fashion_test_images_file()) check_file(paths.fashion_train_images_file()) check_file(paths.fashion_test_labels_file()) check_file(paths.fashion_train_labels_file()) check_file(paths.celeba_test_images_file()) check_file(paths.celeba_train_images_file()) check_file(paths.celeba_test_labels_file()) check_file(paths.celeba_train_labels_file())
def get_parser(self): """ Get parser. :return: parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser( description='Create HDF5 file of rendered font letters and digits.' ) parser.add_argument('-database_file', default=paths.database_file(), help='HDF5 file containing prototype images.', type=str) parser.add_argument('-codes_file', default=paths.codes_file(), help='HDF5 file containing codes.', type=str) parser.add_argument('-theta_file', default=paths.theta_file(), help='HDF5 file containing transformations.', type=str) parser.add_argument('-batch_size', default=32, help='Batch size.', type=int) parser.add_argument('-no_gpu', dest='use_gpu', action='store_false') parser.add_argument('-images_file', default=paths.images_file(), help='HDF5 file containing transformed images.', type=str) parser.set_defaults(use_gpu=True) return parser
def get_parser(self): """ Get parser. :return: parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser(description='Attack decoder and classifier.') parser.add_argument('-database_file', default=paths.database_file(), help='HDF5 file containing font prototype images.', type=str) parser.add_argument('-test_codes_file', default=paths.test_codes_file(), help='HDF5 file containing codes.', type=str) parser.add_argument('-test_theta_file', default=paths.test_theta_file(), help='HDF5 file containing transformations.', type=str) parser.add_argument('-classifier_file', default=paths.state_file('classifier'), help='Snapshot state file of classifier.', type=str) parser.add_argument('-accuracy_file', default=paths.results_file('decoder/accuracy'), help='Correctly classified test samples of classifier.', type=str) parser.add_argument('-perturbations_file', default=paths.results_file('decoder/perturbations'), help='HDF5 file containing perturbations.', type=str) parser.add_argument('-success_file', default=paths.results_file('decoder/success'), help='HDF5 file containing perturbations.', type=str) parser.add_argument('-log_file', default=paths.log_file('decoder/attacks'), help='Log file.', type=str) parser.add_argument('-attack', default='UntargetedBatchL2ClippedGradientDescent', help='Attack to try.', type=str) parser.add_argument('-objective', default='UntargetedF6', help='Objective to use.', type=str) parser.add_argument('-max_attempts', default=1, help='Maximum number of attempts per attack.', type=int) parser.add_argument('-max_samples', default=20*128, help='How many samples from the test set to attack.', type=int) parser.add_argument('-batch_size', default=128, help='Batch size of attack.', type=int) parser.add_argument('-epsilon', default=0.1, help='Epsilon allowed for attacks.', type=float) parser.add_argument('-c_0', default=0., help='Weight of norm.', type=float) parser.add_argument('-c_1', default=0.1, help='Weight of bound, if not enforced through clipping or reparameterization.', type=float) parser.add_argument('-c_2', default=0.5, help='Weight of objective.', type=float) parser.add_argument('-max_iterations', default=100, help='Number of iterations for attack.', type=int) parser.add_argument('-max_projections', default=5, help='Number of projections for alternating projection.', type=int) parser.add_argument('-base_lr', default=0.005, help='Learning rate for attack.', type=float) parser.add_argument('-no_gpu', dest='use_gpu', action='store_false') parser.add_argument('-no_label_leaking', default=False, dest='no_label_leaking', action='store_true') parser.add_argument('-on_manifold', default=False, dest='on_manifold', action='store_true') parser.add_argument('-initialize_zero', default=False, action='store_true', help='Initialize attack at zero.') # Some network parameters. parser.add_argument('-network_architecture', default='standard', help='Classifier architecture to use.', type=str) parser.add_argument('-network_activation', default='relu', help='Activation function to use.', type=str) parser.add_argument('-network_no_batch_normalization', default=False, help='Do not use batch normalization.', action='store_true') parser.add_argument('-network_channels', default=16, help='Channels of first convolutional layer, afterwards channels are doubled.', type=int) parser.add_argument('-network_dropout', default=False, action='store_true', help='Whether to use dropout.') parser.add_argument('-network_units', default='1024,1024,1024,1024', help='Units for MLP.') return parser
def get_parser(self): """ Get parser. :return: parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser( description='Attack decoder and classifier.') parser.add_argument('-database_file', default=paths.database_file(), help='HDF5 file containing font prototype images.', type=str) parser.add_argument('-test_theta_file', default=paths.test_theta_file(), help='HDF5 file for thetas.', type=str) parser.add_argument('-test_codes_file', default=paths.test_codes_file(), help='HDF5 file containing codes.', type=str) parser.add_argument( '-perturbations_file', default=paths.results_file('decoder/perturbations'), help='HDF5 file containing perturbations.', type=str) parser.add_argument( '-perturbation_images_file', default=paths.results_file('decoder/perturbation_images'), help='HDF5 file for perturbation images.', type=str) parser.add_argument('-log_file', default=paths.log_file('decoder/attacks'), help='Log file.', type=str) parser.add_argument('-batch_size', default=128, help='Batch size of attack.', type=int) parser.add_argument('-no_gpu', dest='use_gpu', action='store_false') return parser
def get_parser(self): """ Get parser. :return: parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser( description='Inspect transformed images.') parser.add_argument('-database_file', default=paths.database_file(), type=str) parser.add_argument('-codes_file', default=paths.codes_file(), type=str) parser.add_argument('-theta_file', default=paths.theta_file(), type=str) parser.add_argument('-images_file', default=paths.images_file(), type=str) parser.add_argument('-train_codes_file', default=paths.train_codes_file(), type=str) parser.add_argument('-test_codes_file', default=paths.test_codes_file(), type=str) parser.add_argument('-train_theta_file', default=paths.train_theta_file(), type=str) parser.add_argument('-test_theta_file', default=paths.test_theta_file(), type=str) parser.add_argument('-train_images_file', default=paths.train_images_file(), type=str) parser.add_argument('-test_images_file', default=paths.test_images_file(), type=str) return parser
def get_parser(self): """ Get parser. :return: parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser(description='Detect attacks on classifier.') parser.add_argument('-mode', default='svd', help='Mode.', type=str) parser.add_argument('-database_file', default=paths.database_file(), help='HDF5 file containing font prototype images.', type=str) parser.add_argument('-test_codes_file', default=paths.test_codes_file(), help='HDF5 file codes dataset.', type=str) parser.add_argument('-test_theta_file', default=paths.test_theta_file(), help='HDF5 file containing transformations.', type=str) parser.add_argument('-train_images_file', default=paths.train_images_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-test_images_file', default=paths.test_images_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-perturbations_file', default=paths.results_file('classifier/perturbations'), help='HDF5 file containing perturbations.', type=str) parser.add_argument('-success_file', default=paths.results_file('classifier/success'), help='HDF5 file containing success indicators.', type=str) parser.add_argument('-accuracy_file', default=paths.results_file('classifier/accuracy'), help='HDF5 file containing accuracy indicators.', type=str) parser.add_argument('-batch_size', default=64, help='Batch size.', type=int) parser.add_argument('-no_gpu', dest='use_gpu', action='store_false') parser.add_argument('-pre_pca', default=20, help='PCA dimensionality reduction ebfore NN.', type=int) parser.add_argument('-n_nearest_neighbors', default=50, help='Number of NNs to consider.', type=int) parser.add_argument('-n_pca', default=10, help='Number of NNs to consider.', type=int) parser.add_argument('-n_fit', default=100000, help='Training images to fit.', type=int) parser.add_argument('-plot_directory', default=paths.experiment_dir('classifier/detection'), help='Plot directory.', type=str) parser.add_argument('-max_samples', default=1000, help='Number of samples.', type=int) # Some decoder parameters. parser.add_argument('-decoder_files', default=paths.state_file('decoder'), help='Decoder files.', type=str) parser.add_argument('-latent_space_size', default=10, help='Size of latent space.', type=int) parser.add_argument('-decoder_architecture', default='standard', help='Architecture to use.', type=str) parser.add_argument('-decoder_activation', default='relu', help='Activation function to use.', type=str) parser.add_argument('-decoder_no_batch_normalization', default=False, help='Do not use batch normalization.', action='store_true') parser.add_argument('-decoder_channels', default=16, help='Channels of first convolutional layer, afterwards channels are doubled.', type=int) parser.add_argument('-decoder_dropout', default=False, action='store_true', help='Whether to use dropout.') parser.add_argument('-decoder_units', default='1024,1024,1024,1024', help='Units for MLP.') return parser
def get_parser(self): """ Get parser. :return: parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser( description='Create HDF5 file of rendered font letters and digits.' ) parser.add_argument('-database_file', default=paths.database_file(), help='HDF5 file created.', type=str) parser.add_argument('-size', default=24, help='Width and height of images.', type=int) parser.add_argument('-characters', default='ABCDEFGHIJ', help='Characters for generate prototypes for.') return parser
def get_parser(self): """ Get parser. :return: parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser( description='Visualize attacks on decoder and classifier.') parser.add_argument('-database_file', default=paths.database_file(), help='HDF5 file containing font prototype images.', type=str) parser.add_argument('-test_theta_file', default=paths.test_theta_file(), help='HDF5 file containing transformations.', type=str) parser.add_argument('-test_codes_file', default=paths.test_codes_file(), help='HDF5 file containing codes.', type=str) parser.add_argument('-test_images_file', default=paths.test_images_file(), help='HDF5 file containing images.', type=str) parser.add_argument('-label_index', default=2, help='Label index.', type=int) parser.add_argument('-classifier_file', default=paths.state_file('classifier'), help='Snapshot state file of classifier.', type=str) parser.add_argument( '-perturbations_file', default=paths.results_file('decoder/perturbations'), help='HDF5 file containing perturbations.', type=str) parser.add_argument('-success_file', default=paths.results_file('decoder/success'), help='HDF5 file containing perturbations.', type=str) parser.add_argument( '-accuracy_file', default=paths.results_file('decoder/accuracy'), help='Correctly classified test samples of classifier.', type=str) parser.add_argument( '-output_directory', default=paths.experiment_dir('decoder/perturbations'), help='Directory to store visualizations.', type=str) parser.add_argument('-batch_size', default=128, help='Batch size of attack.', type=int) parser.add_argument('-no_gpu', dest='use_gpu', action='store_false') # Some network parameters. parser.add_argument('-network_architecture', default='standard', help='Classifier architecture to use.', type=str) parser.add_argument('-network_activation', default='relu', help='Activation function to use.', type=str) parser.add_argument('-network_no_batch_normalization', default=False, help='Do not use batch normalization.', action='store_true') parser.add_argument( '-network_channels', default=16, help= 'Channels of first convolutional layer, afterwards channels are doubled.', type=int) parser.add_argument('-network_dropout', default=False, action='store_true', help='Whether to use dropout.') parser.add_argument('-network_units', default='1024,1024,1024,1024', help='Units for MLP.') return parser
def get_parser(self): """ Get parser. :return: parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser( description='Test attacks on classifier.') parser.add_argument('-database_file', default=paths.database_file(), help='HDF5 file containing font prototype images.', type=str) parser.add_argument('-test_images_file', default=paths.test_images_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-train_images_file', default=paths.train_images_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-test_theta_file', default=paths.test_theta_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-train_theta_file', default=paths.train_theta_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-test_codes_file', default=paths.test_codes_file(), help='HDF5 file containing labels.', type=str) parser.add_argument( '-accuracy_file', default=paths.results_file('classifier/accuracy'), help='Correctly classified test samples of classifier.', type=str) parser.add_argument( '-perturbations_file', default=paths.results_file('decoder/perturbations'), help='HDF5 file containing perturbations.', type=str) parser.add_argument('-success_file', default=paths.results_file('decoder/success'), help='HDF5 file indicating attack success.', type=str) parser.add_argument('-plot_directory', default=paths.experiment_dir('decoder'), help='Path to PNG plot file for success rate.', type=str) parser.add_argument('-results_file', default='', help='Path to pickled results file.', type=str) parser.add_argument('-batch_size', default=128, help='Batch size of attack.', type=int) parser.add_argument('-plot_manifolds', default=False, action='store_true', help='Whether to plot manifolds.') parser.add_argument('-no_gpu', dest='use_gpu', action='store_false') return parser
def get_parser(self): """ Get parser. :return: parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser(description='Train classifier.') parser.add_argument('-database_file', default=paths.database_file(), help='HDF5 file containing font prototype images.', type=str) parser.add_argument('-train_images_file', default=paths.train_images_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-train_codes_file', default=paths.train_codes_file(), help='HDF5 file containing codes.', type=str) parser.add_argument('-train_theta_file', default=paths.train_theta_file(), help='HDF5 file containing transformations.', type=str) parser.add_argument('-test_images_file', default=paths.test_images_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-test_codes_file', default=paths.test_codes_file(), help='HDF5 file containing codes.', type=str) parser.add_argument('-label_index', default=2, help='Column index in label file.', type=int) parser.add_argument('-test_theta_file', default=paths.test_theta_file(), help='HDF5 file containing transformations.', type=str) parser.add_argument( '-state_file', default=paths.state_file('robust_manifold_classifier'), help='Snapshot state file.', type=str) parser.add_argument( '-log_file', default=paths.log_file('robust_manifold_classifier'), help='Log file.', type=str) parser.add_argument( '-training_file', default=paths.results_file('robust_manifold_training'), help='Training statistics file.', type=str) parser.add_argument( '-testing_file', default=paths.results_file('robust_manifold_testing'), help='Testing statistics file.', type=str) parser.add_argument('-loss_file', default=paths.image_file('loss'), help='Loss plot file.', type=str) parser.add_argument('-error_file', default=paths.image_file('error'), help='Error plot file.', type=str) parser.add_argument( '-success_file', default=paths.image_file('robust_manifold_success'), help='Success rate plot file.', type=str) parser.add_argument('-gradient_file', default='', help='Gradient plot file.', type=str) parser.add_argument( '-random_samples', default=False, action='store_true', help='Randomize the subsampling of the training set.') parser.add_argument('-training_samples', default=-1, help='Number of samples used for training.', type=int) parser.add_argument('-test_samples', default=-1, help='Number of samples for validation.', type=int) parser.add_argument('-validation_samples', default=0, help='Number of samples for validation.', type=int) parser.add_argument('-early_stopping', default=False, action='store_true', help='Use early stopping.') parser.add_argument('-attack_samples', default=1000, help='Samples to attack.', type=int) parser.add_argument('-batch_size', default=64, help='Batch size.', type=int) parser.add_argument('-epochs', default=10, help='Number of epochs.', type=int) parser.add_argument('-weight_decay', default=0.0001, help='Weight decay importance.', type=float) parser.add_argument('-logit_decay', default=0, help='Logit decay importance.', type=float) parser.add_argument('-no_gpu', dest='use_gpu', action='store_false') parser.add_argument('-skip', default=5, help='Verbosity in iterations.', type=int) parser.add_argument('-lr', default=0.005, type=float, help='Base learning rate.') parser.add_argument('-lr_decay', default=0.9, type=float, help='Learning rate decay.') parser.add_argument('-results_file', default='', help='Results file for evaluation.', type=str) parser.add_argument('-debug_directory', default='', help='Debug directory.', type=str) # Some network parameters. parser.add_argument('-network_architecture', default='standard', help='Classifier architecture to use.', type=str) parser.add_argument('-network_activation', default='relu', help='Activation function to use.', type=str) parser.add_argument('-network_no_batch_normalization', default=False, help='Do not use batch normalization.', action='store_true') parser.add_argument( '-network_channels', default=16, help= 'Channels of first convolutional layer, afterwards channels are doubled.', type=int) parser.add_argument('-network_dropout', default=False, action='store_true', help='Whether to use dropout.') parser.add_argument('-network_units', default='1024,1024,1024,1024', help='Units for MLP.') # Attack parameters. parser.add_argument('-attack', default='UntargetedBatchL2ClippedGradientDescent', help='Attack to try.', type=str) parser.add_argument('-objective', default='UntargetedF6', help='Objective to use.', type=str) parser.add_argument('-epsilon', default=1, help='Epsilon allowed for attacks.', type=float) parser.add_argument('-c_0', default=0., help='Weight of norm.', type=float) parser.add_argument( '-c_1', default=0.1, help= 'Weight of bound, if not enforced through clipping or reparameterization.', type=float) parser.add_argument('-c_2', default=0.5, help='Weight of objective.', type=float) parser.add_argument('-max_iterations', default=10, help='Number of iterations for attack.', type=int) parser.add_argument( '-max_projections', default=5, help='Number of projections for alternating projection.', type=int) parser.add_argument('-base_lr', default=0.005, help='Learning rate for attack.', type=float) parser.add_argument('-verbose', action='store_true', default=False, help='Verbose attacks.') parser.add_argument('-anneal_epochs', default=0, help='Anneal iterations in the first epochs.', type=int) # Variants. parser.add_argument('-full_variant', default=False, action='store_true', help='100% variant.') parser.add_argument('-training_mode', default=False, action='store_true', help='Training mode variant for attack.') return parser
def get_parser(self): """ Get parser. :return: parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser(description='Generate latent codes.') parser.add_argument('-database_file', default=paths.database_file(), help='HDF5 file created.', type=str) parser.add_argument('-codes_file', default=paths.codes_file(), help='HDF5 file containing codes.', type=str) parser.add_argument('-theta_file', default=paths.theta_file(), help='HDF5 file containing transformations.', type=str) parser.add_argument( '-number_transformations', default=2, help= 'Number of transformations, applied in this order: scale, rotation, translation in x, translation in y, shear in x and shear in y.', type=int) parser.add_argument( '-min_scale', default=0.9, help='Minimum scale (relative, with 1 being original scale).', type=float) parser.add_argument( '-max_scale', default=1.1, help='Maximum scale (relative, with 1 being original scale).', type=float) parser.add_argument('-min_rotation', default=-math.pi / 4, help='Minimum rotation (e.g., -math.pi).', type=float) parser.add_argument('-max_rotation', default=math.pi / 4, help='Maximum rotation (e.g., math.pi).', type=float) parser.add_argument( '-min_translation', default=-0.2, help='Minimum translation in both x and y (relative to size).', type=float) parser.add_argument( '-max_translation', default=0.2, help='Maximum translation in both x and y (relative to size).', type=float) parser.add_argument('-min_shear', default=-0.5, help='Minimum shear (relative to size).', type=float) parser.add_argument('-max_shear', default=0.5, help='Maximum shear (relative to size).', type=float) parser.add_argument('-min_color', default=0.5, help='Minimum color value, maximum is 1.', type=float) parser.add_argument( '-multiplier', default=1000, help='How many times to multiply each font/letter.', type=int) return parser