def get_parser(self): """ Get parser. :return: parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser(description='Test attacks on classifier.') parser.add_argument('-test_images_file', default=paths.test_images_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-train_images_file', default=paths.train_images_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-test_codes_file', default=paths.test_codes_file(), help='HDF5 file containing labels.', type=str) parser.add_argument('-label_index', default=2, help='Column index in label file.', type=int) parser.add_argument('-accuracy_file', default=paths.results_file('learned_decoder/accuracy'), help='Correctly classified test samples of classifier.', type=str) parser.add_argument('-perturbations_file', default=paths.results_file('learned_decoder/perturbations'), help='HDF5 file containing perturbations.', type=str) parser.add_argument('-success_file', default=paths.results_file('learned_decoder/success'), help='HDF5 file indicating attack success.', type=str) parser.add_argument('-plot_directory', default=paths.experiment_dir('learned_decoder'), help='Path to PNG plot file for success rate.', type=str) parser.add_argument('-results_file', default='', help='Path to pickled results file.', type=str) parser.add_argument('-batch_size', default=128, help='Batch size of attack.', type=int) parser.add_argument('-plot_manifolds', default=False, action='store_true', help='Whether to plot manifolds.') parser.add_argument('-no_gpu', dest='use_gpu', action='store_false') # Network. parser.add_argument('-N_theta', default=6, help='Numer of transformations.', type=int) return parser
def get_parser(self): """ Get parser. :return: parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser(description='Detect attacks on classifier.') parser.add_argument('-mode', default='svd', help='Mode.', type=str) parser.add_argument('-database_file', default=paths.database_file(), help='HDF5 file containing font prototype images.', type=str) parser.add_argument('-test_codes_file', default=paths.test_codes_file(), help='HDF5 file codes dataset.', type=str) parser.add_argument('-test_theta_file', default=paths.test_theta_file(), help='HDF5 file containing transformations.', type=str) parser.add_argument('-train_images_file', default=paths.train_images_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-test_images_file', default=paths.test_images_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-perturbations_file', default=paths.results_file('classifier/perturbations'), help='HDF5 file containing perturbations.', type=str) parser.add_argument('-success_file', default=paths.results_file('classifier/success'), help='HDF5 file containing success indicators.', type=str) parser.add_argument('-accuracy_file', default=paths.results_file('classifier/accuracy'), help='HDF5 file containing accuracy indicators.', type=str) parser.add_argument('-batch_size', default=64, help='Batch size.', type=int) parser.add_argument('-no_gpu', dest='use_gpu', action='store_false') parser.add_argument('-pre_pca', default=20, help='PCA dimensionality reduction ebfore NN.', type=int) parser.add_argument('-n_nearest_neighbors', default=50, help='Number of NNs to consider.', type=int) parser.add_argument('-n_pca', default=10, help='Number of NNs to consider.', type=int) parser.add_argument('-n_fit', default=100000, help='Training images to fit.', type=int) parser.add_argument('-plot_directory', default=paths.experiment_dir('classifier/detection'), help='Plot directory.', type=str) parser.add_argument('-max_samples', default=1000, help='Number of samples.', type=int) # Some decoder parameters. parser.add_argument('-decoder_files', default=paths.state_file('decoder'), help='Decoder files.', type=str) parser.add_argument('-latent_space_size', default=10, help='Size of latent space.', type=int) parser.add_argument('-decoder_architecture', default='standard', help='Architecture to use.', type=str) parser.add_argument('-decoder_activation', default='relu', help='Activation function to use.', type=str) parser.add_argument('-decoder_no_batch_normalization', default=False, help='Do not use batch normalization.', action='store_true') parser.add_argument('-decoder_channels', default=16, help='Channels of first convolutional layer, afterwards channels are doubled.', type=int) parser.add_argument('-decoder_dropout', default=False, action='store_true', help='Whether to use dropout.') parser.add_argument('-decoder_units', default='1024,1024,1024,1024', help='Units for MLP.') return parser
def get_parser(self): """ Get parser. :return: parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser(description='Test attacks on classifier.') parser.add_argument('-test_images_file', default=paths.test_images_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-train_images_file', default=paths.train_images_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-test_theta_file', default=paths.results_file('test_theta'), help='HDF5 file containing dataset.', type=str) parser.add_argument('-train_theta_file', default=paths.results_file('train_theta'), help='HDF5 file containing dataset.', type=str) parser.add_argument('-test_codes_file', default=paths.test_codes_file(), help='HDF5 file containing labels.', type=str) parser.add_argument('-decoder_files', default=paths.state_file('decoder'), help='Decoder files.', type=str) parser.add_argument('-label_index', default=2, help='Column index in label file.', type=int) parser.add_argument('-accuracy_file', default=paths.results_file('learned_decoder/accuracy'), help='Correctly classified test samples of classifier.', type=str) parser.add_argument('-perturbations_file', default=paths.results_file('learned_decoder/perturbations'), help='HDF5 file containing perturbations.', type=str) parser.add_argument('-success_file', default=paths.results_file('learned_decoder/success'), help='HDF5 file indicating attack success.', type=str) parser.add_argument('-plot_directory', default=paths.experiment_dir('learned_decoder'), help='Path to PNG plot file for success rate.', type=str) parser.add_argument('-results_file', default='', help='Path to pickled results file.', type=str) parser.add_argument('-batch_size', default=128, help='Batch size of attack.', type=int) parser.add_argument('-plot_manifolds', default=False, action='store_true', help='Whether to plot manifolds.') parser.add_argument('-no_gpu', dest='use_gpu', action='store_false') parser.add_argument('-bound', default=2, help='Bound to consider for samples in latent space.', type=float) # Some decoder parameters. parser.add_argument('-latent_space_size', default=10, help='Size of latent space.', type=int) parser.add_argument('-decoder_architecture', default='standard', help='Architecture to use.', type=str) parser.add_argument('-decoder_activation', default='relu', help='Activation function to use.', type=str) parser.add_argument('-decoder_no_batch_normalization', default=False, help='Do not use batch normalization.', action='store_true') parser.add_argument('-decoder_channels', default=16, help='Channels of first convolutional layer, afterwards channels are doubled.', type=int) parser.add_argument('-decoder_dropout', default=False, action='store_true', help='Whether to use dropout.') parser.add_argument('-decoder_units', default='1024,1024,1024,1024', help='Units for MLP.') return parser
def get_parser(self): """ Get parser. :return: parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser( description='Test attacks on classifier.') parser.add_argument('-classifier_file', default=paths.state_file('classifier'), help='Snapshot state file of classifier.', type=str) parser.add_argument('-test_images_file', default=paths.test_images_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-test_codes_file', default=paths.test_codes_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-label_index', default=2, help='Column index in label file.', type=int) parser.add_argument( '-perturbations_file', default=paths.results_file('classifier/perturbations'), help='HDF5 file containing perturbations.', type=str) parser.add_argument('-success_file', default=paths.results_file('classifier/success'), help='HDF5 file indicating attack success.', type=str) parser.add_argument( '-probabilities_file', default=paths.results_file('classifier/probabilities'), help='HDF5 file containing attack probabilities.') parser.add_argument('-results_file', default='', help='Path to pickled results file.', type=str) parser.add_argument('-plot_directory', default=paths.experiment_dir('classifier'), help='Path to PNG plot file for success rate.', type=str) parser.add_argument('-plot_manifolds', default=False, action='store_true', help='Whether to plot manifolds.') parser.add_argument('-no_gpu', dest='use_gpu', action='store_false') parser.add_argument('-batch_size', default=128, help='Batch size of attack.', type=int) # Some network parameters. parser.add_argument('-network_architecture', default='standard', help='Classifier architecture to use.', type=str) parser.add_argument('-network_activation', default='relu', help='Activation function to use.', type=str) parser.add_argument('-network_no_batch_normalization', default=False, help='Do not use batch normalization.', action='store_true') parser.add_argument( '-network_channels', default=16, help= 'Channels of first convolutional layer, afterwards channels are doubled.', type=int) parser.add_argument('-network_dropout', default=False, action='store_true', help='Whether to use dropout.') parser.add_argument('-network_units', default='1024,1024,1024,1024', help='Units for MLP.') return parser
def train(self, base_directory, method, label): """ Train VAE-GAN. """ encoder_file = paths.state_file('%s/encoder' % base_directory) decoder_file = paths.state_file('%s/decoder' % base_directory) classifier_file = paths.state_file('%s/classifier' % base_directory) reconstruction_file = paths.results_file('%s/reconstructions' % base_directory) interpolation_file = paths.results_file('%s/interpolation' % base_directory) random_file = paths.results_file('%s/random' % base_directory) log_file = paths.log_file('%s/vaegan' % base_directory) results_file = paths.pickle_file('%s/results' % base_directory) training_file = paths.results_file('%s/training' % base_directory) testing_file = paths.results_file('%s/testing' % base_directory) error_file = paths.image_file('%s/error' % base_directory) gradient_file = paths.image_file('%s/gradient' % base_directory) test_results_file = paths.pickle_file('%s/test_results' % base_directory) if not os.path.exists(encoder_file) or not os.path.exists( decoder_file): if method.find('vaegan') >= 0: arguments = [ '-train_images_file=%s' % self.train_images_file, '-train_codes_file=%s' % self.train_codes_file, '-test_images_file=%s' % self.test_images_file, '-test_codes_file=%s' % self.test_codes_file, '-label_index=%d' % self.label_index, '-label=%d' % label, '-encoder_file=%s' % encoder_file, '-decoder_file=%s' % decoder_file, '-classifier_file=%s' % classifier_file, '-reconstruction_file=%s' % reconstruction_file, '-interpolation_file=%s' % interpolation_file, '-random_file=%s' % random_file, '-log_file=%s' % log_file, '-results_file=%s' % results_file, '-training_file=%s' % training_file, '-testing_file=%s' % testing_file, '-error_file=%s' % error_file, '-gradient_file=%s' % gradient_file, '-latent_space_size=%d' % self.args.latent_space_size, '-beta=%g' % self.args.beta, '-gamma=%g' % self.args.gamma, '-epochs=%d' % self.args.epochs, '-eta=%g' % self.args.eta, ] + self.training_parameters + self.network_parameters + self.classifier_parameters log(arguments) if method == 'vaegan2': train = TrainVAEGAN2(arguments) else: raise NotImplementedError() train.main() elif method == 'vae': arguments = [ '-train_images_file=%s' % self.train_images_file, '-train_codes_file=%s' % self.train_codes_file, '-test_images_file=%s' % self.test_images_file, '-test_codes_file=%s' % self.test_codes_file, '-label_index=%d' % self.label_index, '-label=%d' % label, '-encoder_file=%s' % encoder_file, '-decoder_file=%s' % decoder_file, '-reconstruction_file=%s' % reconstruction_file, '-interpolation_file=%s' % interpolation_file, '-random_file=%s' % random_file, '-log_file=%s' % log_file, '-results_file=%s' % results_file, '-training_file=%s' % training_file, '-testing_file=%s' % testing_file, '-error_file=%s' % error_file, '-latent_space_size=%d' % self.args.latent_space_size, '-beta=%g' % self.args.beta, '-epochs=%d' % self.args.epochs, ] + self.training_parameters + self.network_parameters train = TrainVariationalAutoEncoder(arguments) train.main() else: raise NotImplementedError() train_theta_file = paths.results_file('%s/train_theta' % base_directory) test_theta_file = paths.results_file('%s/test_theta' % base_directory) if self.args.reevaluate or not os.path.exists(test_results_file): test = TestVariationalAutoEncoder([ '-train_images_file=%s' % self.train_images_file, '-test_images_file=%s' % self.test_images_file, '-train_codes_file=%s' % self.train_codes_file, '-test_codes_file=%s' % self.test_codes_file, '-train_theta_file=%s' % train_theta_file, '-test_theta_file=%s' % test_theta_file, '-label_index=%d' % self.label_index, '-label=%d' % label, '-encoder_file=%s' % encoder_file, '-decoder_file=%s' % decoder_file, '-batch_size=256', '-results_file=%s' % test_results_file, '-reconstruction_file=', '-random_file=', '-interpolation_file=', '-output_directory=', '-latent_space_size=%d' % self.args.latent_space_size, ] + self.network_parameters) test.main() results = test.results else: results = utils.read_pickle(test_results_file) self.results[base_directory] = (results['reconstruction_error'], results['code_mean'], results['code_var']) if utils.display(): random_directory = paths.experiment_dir( '%s/random/' % base_directory, experiment=self.experiment()) if self.args.reevaluate or not os.path.exists(random_directory): visualize_mosaic = VisualizeMosaic([ '-images_file=%s' % random_file, '-output_directory=%s' % random_directory, ]) visualize_mosaic.main() random_directory = paths.experiment_dir( '%s/random_/' % base_directory, experiment=self.experiment()) # if self.args.reevaluate or not os.path.exists(random_directory): visualize_individual = VisualizeIndividual([ '-images_file=%s' % random_file, '-output_directory=%s' % random_directory, ]) visualize_individual.main() if utils.display(): reconstruction_directory = paths.experiment_dir( '%s/reconstruction/' % base_directory, experiment=self.experiment()) if self.args.reevaluate or not os.path.exists( reconstruction_directory): visualize_mosaic = VisualizeMosaic([ '-images_file=%s' % reconstruction_file, '-output_directory=%s' % reconstruction_directory, ]) visualize_mosaic.main() reconstruction_directory = paths.experiment_dir( '%s/reconstruction_/' % base_directory, experiment=self.experiment()) # if self.args.reevaluate or not os.path.exists(reconstruction_directory): visualize_individual = VisualizeIndividual([ '-images_file=%s' % reconstruction_file, '-output_directory=%s' % reconstruction_directory, ]) visualize_individual.main() if utils.display(): interpolation_directory = paths.experiment_dir( '%s/interpolation/' % base_directory, experiment=self.experiment()) if self.args.reevaluate or not os.path.exists( interpolation_directory): visualize_mosaic = VisualizeMosaic([ '-images_file=%s' % interpolation_file, '-output_directory=%s' % interpolation_directory, ]) visualize_mosaic.main() interpolation_directory = paths.experiment_dir( '%s/interpolation_/' % base_directory, experiment=self.experiment()) # if self.args.reevaluate or not os.path.exists(reconstruction_directory): visualize_individual = VisualizeIndividual([ '-images_file=%s' % interpolation_file, '-output_directory=%s' % interpolation_directory, ]) visualize_individual.main() if utils.display(): original_directory = paths.experiment_dir( '%s/original/' % base_directory, experiment=self.experiment()) if self.args.reevaluate or not os.path.exists(original_directory): visualize_mosaic = VisualizeMosaic([ '-images_file=%s' % self.test_images_file, '-codes_file=%s' % self.test_codes_file, '-label_index=%d' % self.label_index, '-label=%d' % label, '-output_directory=%s' % original_directory, ]) visualize_mosaic.main() original_directory = paths.experiment_dir( '%s/original_/' % base_directory, experiment=self.experiment()) # if self.args.reevaluate or not os.path.exists(reconstruction_directory): visualize_individual = VisualizeIndividual([ '-images_file=%s' % self.test_images_file, '-codes_file=%s' % self.test_codes_file, '-label_index=%d' % self.label_index, '-label=%d' % label, '-output_directory=%s' % original_directory, ]) visualize_individual.main()
def get_parser(self): """ Get parser. :return: parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser( description='Visualize attacks on decoder and classifier.') parser.add_argument('-database_file', default=paths.database_file(), help='HDF5 file containing font prototype images.', type=str) parser.add_argument('-test_theta_file', default=paths.test_theta_file(), help='HDF5 file containing transformations.', type=str) parser.add_argument('-test_codes_file', default=paths.test_codes_file(), help='HDF5 file containing codes.', type=str) parser.add_argument('-test_images_file', default=paths.test_images_file(), help='HDF5 file containing images.', type=str) parser.add_argument('-label_index', default=2, help='Label index.', type=int) parser.add_argument('-classifier_file', default=paths.state_file('classifier'), help='Snapshot state file of classifier.', type=str) parser.add_argument( '-perturbations_file', default=paths.results_file('decoder/perturbations'), help='HDF5 file containing perturbations.', type=str) parser.add_argument('-success_file', default=paths.results_file('decoder/success'), help='HDF5 file containing perturbations.', type=str) parser.add_argument( '-accuracy_file', default=paths.results_file('decoder/accuracy'), help='Correctly classified test samples of classifier.', type=str) parser.add_argument( '-output_directory', default=paths.experiment_dir('decoder/perturbations'), help='Directory to store visualizations.', type=str) parser.add_argument('-batch_size', default=128, help='Batch size of attack.', type=int) parser.add_argument('-no_gpu', dest='use_gpu', action='store_false') # Some network parameters. parser.add_argument('-network_architecture', default='standard', help='Classifier architecture to use.', type=str) parser.add_argument('-network_activation', default='relu', help='Activation function to use.', type=str) parser.add_argument('-network_no_batch_normalization', default=False, help='Do not use batch normalization.', action='store_true') parser.add_argument( '-network_channels', default=16, help= 'Channels of first convolutional layer, afterwards channels are doubled.', type=int) parser.add_argument('-network_dropout', default=False, action='store_true', help='Whether to use dropout.') parser.add_argument('-network_units', default='1024,1024,1024,1024', help='Units for MLP.') return parser
def get_parser(self): """ Get parser. :return: parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser( description='Test attacks on classifier.') parser.add_argument('-test_images_file', default=paths.test_images_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-train_images_file', default=paths.train_images_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-test_theta_file', default=paths.test_theta_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-train_theta_file', default=paths.train_theta_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-test_codes_file', default=paths.test_codes_file(), help='HDF5 file containing labels.', type=str) parser.add_argument('-label_index', default=2, help='Column index in label file.', type=int) parser.add_argument( '-accuracy_file', default=paths.results_file('classifier/accuracy'), help='Correctly classified test samples of classifier.', type=str) parser.add_argument( '-perturbations_file', default=paths.results_file('classifier/perturbations'), help='HDF5 file containing perturbations.', type=str) parser.add_argument('-success_file', default=paths.results_file('classifier/success'), help='HDF5 file indicating attack success.', type=str) parser.add_argument('-results_file', default='', help='Path to pickled results file.', type=str) parser.add_argument('-plot_directory', default=paths.experiment_dir('classifier'), help='Path to PNG plot file for success rate.', type=str) parser.add_argument('-plot_manifolds', default=False, action='store_true', help='Whether to plot manifolds.') parser.add_argument('-latent', default=False, action='store_true', help='Latent statistics.') return parser
def get_parser(self): """ Get parser. :return: parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser( description='Attack decoder and classifier.') parser.add_argument('-test_images_file', default=paths.test_images_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-test_codes_file', default=paths.test_codes_file(), help='HDF5 file containing codes.', type=str) parser.add_argument('-label_index', default=2, help='Label index.', type=int) parser.add_argument('-classifier_file', default=paths.state_file('classifier'), help='Snapshot state file of classifier.', type=str) parser.add_argument('-output_directory', default=paths.experiment_dir('output'), help='Output directory.', type=str) parser.add_argument('-log_file', default=paths.log_file('learned_decoder/attacks'), help='Log file.', type=str) parser.add_argument('-objective', default='UntargetedF0', help='Objective to use.', type=str) parser.add_argument( '-max_samples', default=10, help='How many samples from the test set to attack.', type=int) parser.add_argument('-no_gpu', dest='use_gpu', action='store_false') parser.add_argument('-N_theta', default=6, help='Numer of transformations.', type=int) parser.add_argument('-translation_x', default='-0.1,0.1', type=str, help='Minimum and maximum translation in x.') parser.add_argument('-translation_y', default='-0.1,0.1', type=str, help='Minimum and maximum translation in y') parser.add_argument('-shear_x', default='-0.25,0.25', type=str, help='Minimum and maximum shear in x.') parser.add_argument('-shear_y', default='-0.25,0.25', type=str, help='Minimum and maximum shear in y.') parser.add_argument('-scale', default='0.95,1.05', type=str, help='Minimum and maximum scale.') parser.add_argument('-rotation', default='%g,%g' % (-math.pi / 4, math.pi / 4), type=str, help='Minimum and maximum rotation.') parser.add_argument('-color', default=0.5, help='Minimum color value, maximum is 1.', type=float) # Some network parameters. parser.add_argument('-network_architecture', default='standard', help='Classifier architecture to use.', type=str) parser.add_argument('-network_activation', default='relu', help='Activation function to use.', type=str) parser.add_argument('-network_no_batch_normalization', default=False, help='Do not use batch normalization.', action='store_true') parser.add_argument( '-network_channels', default=16, help= 'Channels of first convolutional layer, afterwards channels are doubled.', type=int) parser.add_argument('-network_dropout', default=False, action='store_true', help='Whether to use dropout.') parser.add_argument('-network_units', default='1024,1024,1024,1024', help='Units for MLP.') return parser
def get_parser(self): """ Get parser. :return: parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser( description='Attack decoder and classifier.') parser.add_argument('-test_images_file', default=paths.test_images_file(), help='HDF5 file containing dataset.', type=str) parser.add_argument('-test_codes_file', default=paths.test_codes_file(), help='HDF5 file containing codes.', type=str) parser.add_argument('-test_theta_file', default=paths.results_file('test_theta'), help='HDF5 file containing theta.', type=str) parser.add_argument('-label_index', default=2, help='Label index.', type=int) parser.add_argument('-classifier_file', default=paths.state_file('classifier'), help='Snapshot state file of classifier.', type=str) parser.add_argument('-decoder_files', default=paths.state_file('decoder'), help='Decoder state file.', type=str) parser.add_argument('-output_directory', default=paths.experiment_dir('output'), help='Output directory.', type=str) parser.add_argument('-log_file', default=paths.log_file('learned_decoder/attacks'), help='Log file.', type=str) parser.add_argument('-attack', default='UntargetedBatchL2ClippedGradientDescent', help='Attack to try.', type=str) parser.add_argument('-objective', default='UntargetedF6', help='Objective to use.', type=str) parser.add_argument('-max_attempts', default=1, help='Maximum number of attempts per attack.', type=int) parser.add_argument( '-max_samples', default=10, help='How many samples from the test set to attack.', type=int) parser.add_argument('-epsilon', default=0.1, help='Epsilon allowed for attacks.', type=float) parser.add_argument('-c_0', default=0., help='Weight of norm.', type=float) parser.add_argument( '-c_1', default=0.1, help= 'Weight of bound, if not enforced through clipping or reparameterization.', type=float) parser.add_argument('-c_2', default=0.5, help='Weight of objective.', type=float) parser.add_argument('-max_iterations', default=100, help='Number of iterations for attack.', type=int) parser.add_argument( '-max_projections', default=5, help='Number of projections for alternating projection.', type=int) parser.add_argument('-base_lr', default=0.005, help='Learning rate for attack.', type=float) parser.add_argument('-no_gpu', dest='use_gpu', action='store_false') parser.add_argument('-on_manifold', dest='on_manifold', action='store_true') # Some network parameters. parser.add_argument('-network_architecture', default='standard', help='Classifier architecture to use.', type=str) parser.add_argument('-network_activation', default='relu', help='Activation function to use.', type=str) parser.add_argument('-network_no_batch_normalization', default=False, help='Do not use batch normalization.', action='store_true') parser.add_argument( '-network_channels', default=16, help= 'Channels of first convolutional layer, afterwards channels are doubled.', type=int) parser.add_argument('-network_dropout', default=False, action='store_true', help='Whether to use dropout.') parser.add_argument('-network_units', default='1024,1024,1024,1024', help='Units for MLP.') # Some decoder parameters. parser.add_argument('-latent_space_size', default=10, help='Size of latent space.', type=int) parser.add_argument('-decoder_architecture', default='standard', help='Architecture to use.', type=str) parser.add_argument('-decoder_activation', default='relu', help='Activation function to use.', type=str) parser.add_argument('-decoder_no_batch_normalization', default=False, help='Do not use batch normalization.', action='store_true') parser.add_argument( '-decoder_channels', default=16, help= 'Channels of first convolutional layer, afterwards channels are doubled.', type=int) parser.add_argument('-decoder_units', default='1024,1024,1024,1024', help='Units for MLP.') return parser