sigma=FLAGS.SIGMA,
                   batch_size=FLAGS.BATCH_SIZE,
                   eps=FLAGS.EPS,
                   steps=FLAGS.STEPS,
                   kappa=FLAGS.KAPPA,
                   alpha=FLAGS.ALPHA)


if __name__ == '__main__':
    ROOT_PATH = "../GTSRB"
    SAVE_PATH = "../GTSRB/models"
    train_data_dir = os.path.join(ROOT_PATH, "train.p")
    test_data_dir = os.path.join(ROOT_PATH, "test.p")
    model_dir = os.path.join(SAVE_PATH, "LeNet_1")
    model_dir1 = os.path.join(SAVE_PATH, "AlexNet")

    # General flags
    flags.DEFINE_integer('BATCH_SIZE', 128, 'Size of training batches')
    flags.DEFINE_float('MU', 0, 'The mean of thetruncated normal distribution')
    flags.DEFINE_float(
        'SIGMA', 0.1,
        'The standard deviation of the truncated normal distribution')
    flags.DEFINE_boolean('ADD_DROPOUT', False,
                         'Decide if add dropout process when training')
    flags.DEFINE_boolean('DROPOUT', 0.5, 'Dropout value')
    flags.DEFINE_string('ATTACK_TYPE', 'fgsm', 'Select one attack type')
    flags.DEFINE_float('EPS', 0.3, 'The epsilon (input variation parameter)')
    flags.DEFINE_float('KAPPA', 100, 'Carlini attack confidence')
    flags.DEFINE_integer('STEPS', 10, 'Number of iteration')
    flags.DEFINE_float('ALPHA', 0.05, 'parameter for random noise')
    app.run()
Beispiel #2
0
    return report


def main(argv=None):
    mnist_tutorial_cw(viz_enabled=FLAGS.viz_enabled,
                      nb_epochs=FLAGS.nb_epochs,
                      batch_size=FLAGS.batch_size,
                      nb_classes=FLAGS.nb_classes,
                      source_samples=FLAGS.source_samples,
                      learning_rate=FLAGS.learning_rate,
                      attack_iterations=FLAGS.attack_iterations,
                      model_path=FLAGS.model_path,
                      targeted=FLAGS.targeted)


if __name__ == '__main__':
    flags.DEFINE_boolean('viz_enabled', True, 'Visualize adversarial ex.')
    flags.DEFINE_integer('nb_epochs', 6, 'Number of epochs to train model')
    flags.DEFINE_integer('batch_size', 128, 'Size of training batches')
    flags.DEFINE_integer('nb_classes', 10, 'Number of output classes')
    flags.DEFINE_integer('source_samples', 10, 'Nb of test inputs to attack')
    flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')
    flags.DEFINE_string('model_path', os.path.join("models", "mnist"),
                        'Path to save or load the model file')
    flags.DEFINE_boolean('attack_iterations', 100,
                         'Number of iterations to run attack; 1000 is good')
    flags.DEFINE_boolean('targeted', True,
                         'Run the tutorial in targeted mode?')

    app.run()
Beispiel #3
0
                     'Batch size to be used while running framework')
flags.DEFINE_integer('num_ensemble_models', 1,
                     'Number of models to use in ensemble for highres-faces')

flags.DEFINE_float(
    'active_ratio', 1.0,
    'Upper cap on ratio of unlabelled examples to be querried for labels')
flags.DEFINE_float('split_ratio', 0.5,
                   'How much of disguised-face data to use for training M2')
flags.DEFINE_float('disparity_ratio', 0.25,
                   'What percentage of data to pick to pass on to oracle')
flags.DEFINE_float(
    'eps', 0.1,
    'Region around equiboundary for even considering querying the oracle')

flags.DEFINE_boolean('augment', False,
                     'Augment data while finetuning covariate-based model?')
flags.DEFINE_boolean('refine_models', False,
                     'Refine previously trained models?')
flags.DEFINE_boolean(
    'blind_strategy', False,
    'If yes, pick all where disparity >= 0.5, otherwise pick according to disparity_ratio'
)

if __name__ == "__main__":
    # Reproducability
    init()

    # Set resolution according to flag
    GlobalConstants.low_res = (FLAGS.lowRes, FLAGS.lowRes)
    print("== Low resolution : %s ==" % str(GlobalConstants.low_res))
Beispiel #4
0
                   'If greater than 0 then the gradients would be clipped by '
                   'it.')

flags.DEFINE_bool('sync_replicas', False,
                  'If True will synchronize replicas during training.')

flags.DEFINE_integer('replicas_to_aggregate', 1,
                     'The number of gradients updates before updating params.')

flags.DEFINE_integer('total_num_replicas', 1,
                     'Total number of worker replicas.')

flags.DEFINE_integer('startup_delay_steps', 15,
                     'Number of training steps between replicas startup.')

flags.DEFINE_boolean('reset_train_dir', False,
                     'If true will delete all files in the train_log_dir')

flags.DEFINE_boolean('show_graph_stats', False,
                     'Output model size stats to stderr.')
# yapf: enable

TrainingHParams = collections.namedtuple('TrainingHParams', [
    'learning_rate',
    'optimizer',
    'momentum',
    'use_augment_input',
])


def get_training_hparams():
    return TrainingHParams(learning_rate=FLAGS.learning_rate,
Beispiel #5
0
        feed_dict = {x: adv}
        probabilities = sess.run(preds, feed_dict)
        print(probabilities)

        #Save adversial image
        two_d_img = (np.reshape(adv, (28, 28)) * 255).astype(np.uint8)
        from PIL import Image
        save_image = Image.fromarray(two_d_img)
        save_image = save_image.convert('RGB')
        save_image.save(SAVE_PATH)

        # Close TF session
        sess.close()
    return


def main(argv=None):
    mnist_tutorial_cw(nb_classes=FLAGS.nb_classes,
                      attack_iterations=FLAGS.attack_iterations,
                      targeted=FLAGS.targeted)


if __name__ == '__main__':
    flags.DEFINE_integer('nb_classes', 10, 'Number of output classes')
    flags.DEFINE_integer('attack_iterations', 100,
                         'Number of iterations to run attack; 1000 is good')
    flags.DEFINE_boolean('targeted', True,
                         'Run the tutorial in targeted mode?')

    tf.app.run()
Beispiel #6
0
# pylint: disable=g-direct-tensorflow-import
# pylint: disable=g-direct-third-party-import
from mesh_tensorflow.experimental import input_reader
from mesh_tensorflow.experimental import unet
from tensorflow.contrib import summary as contrib_summary
from tensorflow.contrib import tpu
from tensorflow.contrib.tpu.python.tpu import device_assignment
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import flags
from tensorflow.python.tpu.ops import tpu_ops

FLAGS = flags.FLAGS

flags.DEFINE_boolean('use_tpu', True, 'Use TPU or GPU.')
flags.DEFINE_float('lr', 0.003, 'Learning rate.')
flags.DEFINE_float('lr_drop_steps', 20000,
                   'Learning rate drops for every `lr_drop_steps` steps.')
flags.DEFINE_float('lr_drop_rate', 0.3, 'Learning rate drops by this amount.')
flags.DEFINE_integer('num_train_iterations_per_loop', 500,
                     'Number of training iterations per loop.')
flags.DEFINE_integer('num_eval_iterations_per_loop', 2,
                     'Number of eval iterations per loop.')
flags.DEFINE_integer('num_training_loops', 1000, 'Number of training loops.')

flags.DEFINE_string('mesh_shape', 'rows:4, columns:4, cores:2', 'mesh shape')
flags.DEFINE_string('master', '', 'Can be a headless master.')

flags.DEFINE_string('checkpoint_dir', '', 'Path to saved models.')
flags.DEFINE_integer('save_checkpoints_steps', 500,
Beispiel #7
0
import functools
import os

import mesh_tensorflow as mtf
import numpy as np
import tensorflow as tf  # tf

# pylint: disable=g-direct-tensorflow-import,g-direct-third-party-import
from mesh_tensorflow.experimental import data_aug_lib
from tensorflow.python.platform import flags


FLAGS = flags.FLAGS

flags.DEFINE_boolean('sampled_2d_slices', False,
                     'Whether to build model on 2D CT slices instead of 3D.')

flags.DEFINE_integer('ct_resolution', 128,
                     'Resolution of CT images along depth, height and '
                     'width dimensions.')

flags.DEFINE_integer('n_dataset_read_interleave', 16,
                     'The number of interleave processes.')
flags.DEFINE_integer('n_dataset_processes', 16,
                     'The number of data augmentation processes.')
flags.DEFINE_integer('batch_size_train', 32, 'Training batch size.')
flags.DEFINE_integer('batch_size_eval', 32, 'Evaluation batch size.')
flags.DEFINE_integer('image_nx_block', 8, 'The number of x blocks.')
flags.DEFINE_integer('image_ny_block', 8, 'The number of y blocks.')
flags.DEFINE_integer('image_c', 1,
                     'The number of input image channels. '
Beispiel #8
0
# How often to save a model checkpoint
SAVE_INTERVAL = 2500

FLAGS = flags.FLAGS

flags.DEFINE_string('trace', "./", 'directory for model checkpoints.')
flags.DEFINE_integer('num_iterations', 300000,
                     'number of training iterations.')
flags.DEFINE_string('pretrained_model', '',
                    'filepath of a pretrained model to initialize from.')
flags.DEFINE_string(
    'mode', '',
    'selection from four modes of ["flow", "depth", "depthflow", "stereo"]')
flags.DEFINE_string('train_test', 'train', 'whether to train or test')
flags.DEFINE_boolean("retrain", True, "whether to reset the iteration counter")

flags.DEFINE_string('data_dir', '', 'root filepath of data.')
flags.DEFINE_string('train_file',
                    './filenames/kitti_train_files_png_4frames.txt',
                    'training file')
flags.DEFINE_string('gt_2012_dir', '',
                    'directory of ground truth of kitti 2012')
flags.DEFINE_string('gt_2015_dir', '',
                    'directory of ground truth of kitti 2015')

flags.DEFINE_integer('batch_size', 4, 'batch size for training')
flags.DEFINE_float('learning_rate', 0.0001,
                   'the base learning rate of the generator')
flags.DEFINE_integer('num_gpus', 1, 'the number of gpu to use')
Beispiel #9
0
                              args=eval_params)
        report.train_adv_train_adv_eval = accuracy

    return report


def main(argv=None):
    from cleverhans_tutorials import check_installation
    check_installation(__file__)

    mnist_tutorial(nb_epochs=FLAGS.nb_epochs,
                   batch_size=FLAGS.batch_size,
                   learning_rate=FLAGS.learning_rate,
                   train_dir=FLAGS.train_dir,
                   filename=FLAGS.filename,
                   load_model=FLAGS.load_model)


if __name__ == '__main__':
    flags.DEFINE_integer('nb_epochs', NB_EPOCHS,
                         'Number of epochs to train model')
    flags.DEFINE_integer('batch_size', BATCH_SIZE, 'Size of training batches')
    flags.DEFINE_float('learning_rate', LEARNING_RATE,
                       'Learning rate for training')
    flags.DEFINE_string('train_dir', TRAIN_DIR,
                        'Directory where to save model.')
    flags.DEFINE_string('filename', FILENAME, 'Checkpoint filename.')
    flags.DEFINE_boolean('load_model', LOAD_MODEL,
                         'Load saved model or train.')
    tf.app.run()
Beispiel #10
0
                   'Learning rate drops for every `lr_drop_steps` steps.')
flags.DEFINE_float('lr_drop_rate', 0.3, 'Learning rate drops by this amount.')
flags.DEFINE_integer('num_train_iterations_per_loop', 500,
                     'Number of training iterations per loop.')
flags.DEFINE_integer('num_eval_iterations_per_loop', 2,
                     'Number of eval iterations per loop.')
flags.DEFINE_integer('num_training_loops', 1000, 'Number of training loops.')

flags.DEFINE_string('mesh_shape', 'rows:4, columns:4, cores:2', 'mesh shape')
flags.DEFINE_string('master', '', 'Can be a headless master.')

flags.DEFINE_string('checkpoint_dir', '', 'Path to saved models.')
flags.DEFINE_integer('save_checkpoints_steps', 500,
                     'Frequency for saving models.')

flags.DEFINE_boolean('write_summary', True, 'Whether to write summary.')
flags.DEFINE_string('summary_dir', '', 'Path to saved summaries.')
flags.DEFINE_string('pred_output_dir', '', 'Path to saved pred results.')


class _CapturedObject(object):
    """A placeholder to capture an object.

  This is useful when we need to capture a Python object in the Tensorflow
  control flow body function and use it outside the control flow.
  """
    def __init__(self):
        self._object = None
        self._captured = False

    def capture(self, o):
Beispiel #11
0
flags.DEFINE_float('beta1', 0.0, 'beta 1')
flags.DEFINE_float('beta2', 0.9, 'beta 2')

# end of MNIST data specification

# general configurations below

flags.DEFINE_string('gpus', '', 'visible GPU list')
flags.DEFINE_string('type', '', 'type of the model, classification or regression')

flags.DEFINE_string('data_dir', '', 'directory of data')
flags.DEFINE_string('output_dir', '', 'directory for model outputs.')
flags.DEFINE_string('checkpoint_dir', '', 'directory of checkpoint files.')

# subject adaptation
flags.DEFINE_boolean('adp', True, 'adaptation or not')

flags.DEFINE_integer('batch_size', 64, 'batch size for training.')

flags.DEFINE_integer('train_batch_size', 0, 'batch size for training.')
flags.DEFINE_integer('xval_batch_size', 0, 'batch size for cross evaluation')
flags.DEFINE_integer('test_batch_size', 0, 'batch size for test')

flags.DEFINE_integer('summary_interval', 5, 'how often to record tensorboard summaries.')
flags.DEFINE_integer('validation_interval', 10, 'how often to run a batch through the validation model')
# flags.DEFINE_integer('save_interval', 2000, 'how often to save a model checkpoint.')

flags.DEFINE_boolean('validation', True, 'whether do validation or not')
flags.DEFINE_boolean('evaluate', True, 'evaluate using validation data to select model parameters')

flags.DEFINE_integer('sample_size', 0, 'sample size for analyzing the model')
    def tqdm(*args, **kwargs):
        if args:
            return args[0]
        return kwargs.get('iterable', None)


flags.DEFINE_string(
    'data_dir', '~/.keras/datasets/costar_plush_block_stacking_dataset_v0.4/',
    'Directory for collecting the dataset files')

flags.DEFINE_string('glob_filename', '*success*.h5f',
                    'File path to glob for dataset files.')

flags.DEFINE_boolean(
    'ascending', False,
    'Sort in ascending (1 to 100) or descending (100 to 1) order.')

flags.DEFINE_string(
    'save_txt_prefix', 'costar_block_stacking_v0.4_success_only_',
    'Prefix with which to to save the sorted output txt file with the train test and validation sets'
)

flags.DEFINE_string('save_dir', None,
                    'Where to save the txt files, defaults to data_dir')

flags.DEFINE_integer(
    'seed', 0,
    'numpy seed for shuffling the data, so you can generate this list in a repeatable way'
)
Beispiel #13
0
from cleverhans.attacks import FastGradientMethod,BasicIterativeMethod,MomentumIterativeMethod, CarliniWagnerL2
from cleverhans.utils_keras import cnn_model, vgg_model
from cleverhans.utils_tf_multiple_pr_cifar10 import model_train, model_eval, batch_eval, tf_model_load
from cleverhans.utils import set_log_level

FLAGS = flags.FLAGS

flags.DEFINE_string('train_dir', '/home/labiai/Jiacang/Experiments/tmp/pr/cifar', 'Directory storing the saved model.')
flags.DEFINE_string(
    'filename', 'cifar10.ckpt', 'Filename to save model under.')
flags.DEFINE_integer('nb_epochs', 10000, 'Number of epochs to train model')
flags.DEFINE_integer('batch_size', 128, 'Size of training batches')
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')

flags.DEFINE_boolean('viz_enabled', False, 'Visualize adversarial ex.')
flags.DEFINE_integer('source_samples', 10, 'Nb of test inputs to attack')
flags.DEFINE_string('model_path', os.path.join("models", "mnist"),
                    'Path to save or load the model file')
flags.DEFINE_integer('attack_iterations', 1000,
                     'Number of iterations to run attack; 1000 is good')
flags.DEFINE_boolean('targeted', False,
                     'Run the tutorial in targeted mode?')

n_input = 32*32*3  # MNIST data input (img shape: 28*28)

numColorOutput = 3
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
type = 'single'
type = 'multiple'
mode = "nonpr"
Beispiel #14
0
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for our flags implementation."""
import sys

from tensorflow.python.platform import app
from tensorflow.python.platform import flags

FLAGS = flags.FLAGS
flags.DEFINE_boolean('myflag', False, '')


def main(argv):
    if (len(argv) != 3):
        print("Length of argv was not 3: ", argv)
        sys.exit(-1)

    if argv[1] != "--passthrough":
        print("--passthrough argument not in argv")
        sys.exit(-1)

    if argv[2] != "extra":
        print("'extra' argument not in argv")
        sys.exit(-1)
Beispiel #15
0
import numpy as np
import tensorflow as tf

from utils import *
from model.rnn import BiRNN

from tensorflow.python.platform import flags
from tensorflow.python.platform import app

flags.DEFINE_string('mode', 'train', 'set whether to train or test')
flags.DEFINE_string('model', 'BiRNN', 'set the model to use, BiRNN, CNN')
flags.DEFINE_string('rnncell', 'lstm',
                    'set the rnncell to use, rnn, gru, lstm...')
flags.DEFINE_integer('num_layer', 3, 'set the layers for rnn')
flags.DEFINE_boolean('layerNormalization', True,
                     'set whether to apply layer normalization to rnn cell')

flags.DEFINE_integer('batch_size', 64, 'set the batch size')
flags.DEFINE_integer('num_hidden', 512, 'set the hidden size of rnn cell')
flags.DEFINE_integer('num_feature', 39, 'set the size of input feature')
flags.DEFINE_integer('num_class', 462, 'set the speakrs')
flags.DEFINE_integer('num_epoch', 200, 'set the number of epochs')
flags.DEFINE_float('learning_rate', 0.0001, 'set the learning rate')
flags.DEFINE_float('keep_prob', 0.8, 'set probability of dropout')
flags.DEFINE_float(
    'grad_clip', -1,
    'set the threshold of gradient clipping, -1 denotes no clipping')
flags.DEFINE_string('datadir', '../data', 'set the data root directory')
flags.DEFINE_string('logdir', '../log', 'set the log directory')

FLAGS = flags.FLAGS
from skimage.io import imread

dir_path = dirname(os.path.realpath(__file__))

# Basic model parameters as external flags.
FLAGS = flags.FLAGS

flags.DEFINE_string('data_set', '/middlebury', 'Data Set in use')

flags.DEFINE_string('grid_params', "{ 'sigma_luma' : 2,'sigma_chroma': 4, 'sigma_spatial': 2}",
                    "Bilateral grid parameters")

flags.DEFINE_string('bs_params', "{'lam': 30, 'A_diag_min': 1e-5, 'cg_tol': 1e-5, 'cg_maxiter': 25}",
                    "Bilateral solver parameters")

flags.DEFINE_boolean('write_flows', False,
                    'Write confidence, .flo and img files')

flags.DEFINE_integer('batchsize', 20, 'Batch size for eval loop.')

flags.DEFINE_integer('eval_interval_secs', 300,
                     'How many seconds between executions of the eval loop.')

flags.DEFINE_integer('testsize', 8,
                     'Number of test samples')

flags.DEFINE_integer('d_shape_img', [388, 584, 3],
                           'Data shape: width, height, channels')

flags.DEFINE_integer('d_shape_flow', [388, 584, 2],
                           'Data shape: width, height, channels')
    # Calculating train error
    if testing:
        eval_par = {'batch_size': batch_size}
        acc = model_eval(sess, x, y, preds_adv, x_train,
                         y_train, args=eval_par)
        report.train_clean_train_adv_eval = acc


    return report


def main(argv=None):
    cifar_tutorial(nb_epochs=FLAGS.nb_epochs,
                   batch_size=FLAGS.batch_size,
                   learning_rate=FLAGS.learning_rate,
                   train_dir=FLAGS.train_dir,
                   filename=FLAGS.filename,
                   load_model=FLAGS.load_model,
                   method=FLAGS.method)


if __name__ == '__main__':
    flags.DEFINE_integer('nb_epochs', 40, 'Number of epochs to train model')
    flags.DEFINE_integer('batch_size', 128, 'Size of training batches')
    flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')
    flags.DEFINE_string('train_dir', 'cifar_ff_model',
                        'Directory where to save model.')
    flags.DEFINE_string('filename', 'FF_init_model.ckpt', 'Checkpoint filename.')
    flags.DEFINE_boolean('load_model', True, 'Load saved model or train.')
    flags.DEFINE_string('method', 'FGSM', 'Adversarial attack method')
    tf.app.run()
Beispiel #18
0
will watch each directory. You can also assign names to individual log
directories by putting a colon between the name and the path, as in

tensorboard --logdir=name1:/path/to/logs/1,name2:/path/to/logs/2
""")

flags.DEFINE_string(
    'host', '0.0.0.0', 'What host to listen to. Defaults to '
    'serving on 0.0.0.0, set to 127.0.0.1 (localhost) to'
    'disable remote access (also quiets security warnings).')

flags.DEFINE_integer('port', 6006, 'What port to serve TensorBoard on.')

flags.DEFINE_boolean(
    'purge_orphaned_data', True, 'Whether to purge data that '
    'may have been orphaned due to TensorBoard restarts. '
    'Disabling purge_orphaned_data can be used to debug data '
    'disappearance.')

flags.DEFINE_integer('reload_interval', 5, 'How often the backend should load '
                     'more data.')

# Inspect Mode flags

flags.DEFINE_boolean(
    'inspect', False, """Use this flag to print out a digest
of your event files to the command line, when no data is shown on TensorBoard or
the data shown looks weird.

Example usages:
tensorboard --inspect --event_file=myevents.out
Beispiel #19
0
import keras
from keras import backend as K
import keras_contrib

from grasp_loss import gaussian_kernel_2D
from inception_preprocessing import preprocess_image
import random_crop as rcp

flags.DEFINE_string('data_dir',
                    os.path.join(os.path.expanduser("~"),
                                 '.keras', 'datasets', 'cornell_grasping'),
                    """Path to dataset in TFRecord format
                    (aka Example protobufs) and feature csv files.""")
flags.DEFINE_string('grasp_dataset', 'all', 'TODO(ahundt): integrate with brainrobotdata or allow subsets to be specified')
flags.DEFINE_boolean('grasp_download', False,
                     """Download the grasp_dataset to data_dir if it is not already present.""")
flags.DEFINE_string('train_filename', 'cornell-grasping-dataset-train.tfrecord', 'filename used for the training dataset')
flags.DEFINE_string('evaluate_filename', 'cornell-grasping-dataset-evaluate.tfrecord', 'filename used for the evaluation dataset')
flags.DEFINE_string('test_filename', 'cornell-grasping-dataset-test.tfrecord', 'filename used for the evaluation dataset')
flags.DEFINE_integer('image_size', 224,
                     """DEPRECATED - this doesn't do anything right now. Provide square images of this size.""")
flags.DEFINE_integer('num_preprocess_threads', 12,
                     """Number of preprocessing threads per tower. """
                     """Please make this a multiple of 4.""")
flags.DEFINE_integer('num_readers', 20,
                     """Number of parallel threads reading from the dataset.""")
flags.DEFINE_integer('input_queue_memory_factor', 12,
                     """Size of the queue of preprocessed images. """
                     """Default is ideal but try smaller values, e.g. """
                     """4, 2 or 1, if host memory is constrained. See """
                     """comments in code for more details.""")
Beispiel #20
0
# pylint: disable=g-direct-tensorflow-import
# pylint: disable=g-direct-third-party-import
from mesh_tensorflow.experimental import input_reader
from mesh_tensorflow.experimental import unet
from tensorflow.contrib import summary as contrib_summary
from tensorflow.contrib import tpu
from tensorflow.contrib.tpu.python.tpu import device_assignment
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import flags
from tensorflow.python.tpu.ops import tpu_ops

FLAGS = flags.FLAGS

flags.DEFINE_boolean('use_tpu', True, 'Use TPU or GPU.')
flags.DEFINE_float('lr', 0.003, 'Learning rate.')
flags.DEFINE_float('lr_drop_steps', 20000,
                   'Learning rate drops for every `lr_drop_steps` steps.')
flags.DEFINE_float('lr_drop_rate', 0.3, 'Learning rate drops by this amount.')
flags.DEFINE_integer('num_train_iterations_per_loop', 500,
                     'Number of training iterations per loop.')
flags.DEFINE_integer('num_eval_iterations_per_loop', 2,
                     'Number of eval iterations per loop.')
flags.DEFINE_integer('num_training_loops', 1000, 'Number of training loops.')

flags.DEFINE_string('mesh_shape', 'rows:4, columns:4, cores:2', 'mesh shape')
flags.DEFINE_string('master', '', 'Can be a headless master.')

flags.DEFINE_string('checkpoint_dir', '', 'Path to saved models.')
flags.DEFINE_integer('save_checkpoints_steps', 500,
import tensorflow as tf
from tensorflow.python.platform import app
from tensorflow.python.platform import flags

from cleverhans.utils_mnist import data_mnist
from cleverhans.utils_tf import model_train, model_eval

from cleverhans.attacks import jsma
from cleverhans.attacks_tf import jacobian_graph
from cleverhans.utils import other_classes, cnn_model, pair_visual, grid_visual

FLAGS = flags.FLAGS

flags.DEFINE_string('train_dir', '/tmp', 'Directory storing the saved model.')
flags.DEFINE_string('filename', 'mnist.ckpt', 'Filename to save model under.')
flags.DEFINE_boolean('viz_enabled', True, 'Enable sample visualization.')
flags.DEFINE_integer('nb_epochs', 6, 'Number of epochs to train model')
flags.DEFINE_integer('batch_size', 128, 'Size of training batches')
flags.DEFINE_integer('nb_classes', 10, 'Number of classification classes')
flags.DEFINE_integer('img_rows', 28, 'Input row dimension')
flags.DEFINE_integer('img_cols', 28, 'Input column dimension')
flags.DEFINE_integer('nb_channels', 1, 'Nb of color channels in the input.')
flags.DEFINE_integer('nb_filters', 64, 'Number of convolutional filter to use')
flags.DEFINE_integer('nb_pool', 2, 'Size of pooling area for max pooling')
flags.DEFINE_integer('source_samples', 10, 'Nb of test set examples to attack')
flags.DEFINE_float('learning_rate', 0.1, 'Learning rate for training')


def main(argv=None):
    """
    MNIST tutorial for the Jacobian-based saliency map approach (JSMA)
Beispiel #22
0
from tensorflow.python.platform import flags
from tensorflow.python.training import server_lib

FLAGS = flags.FLAGS

flags.DEFINE_string(
    "cluster_spec", "", """Cluster spec: SPEC.
    SPEC is <JOB>(,<JOB>)*,"
    JOB  is <NAME>|<HOST:PORT>(;<HOST:PORT>)*,"
    NAME is a valid job name ([a-z][0-9a-z]*),"
    HOST is a hostname or IP address,"
    PORT is a port number."
E.g., local|localhost:2222;localhost:2223, ps|ps0:2222;ps1:2222""")
flags.DEFINE_string("job_name", "", "Job name: e.g., local")
flags.DEFINE_integer("task_id", 0, "Task index, e.g., 0")
flags.DEFINE_boolean("verbose", False, "Verbose mode")


def parse_cluster_spec(cluster_spec, cluster):
    """Parse content of cluster_spec string and inject info into cluster protobuf.

  Args:
    cluster_spec: cluster specification string, e.g.,
          "local|localhost:2222;localhost:2223"
    cluster: cluster protobuf.

  Raises:
    ValueError: if the cluster_spec string is invalid.
  """

    job_strings = cluster_spec.split(",")
Beispiel #23
0
def main(argv=None):
    detection(nb_epochs=FLAGS.nb_epochs, batch_size=FLAGS.batch_size, data_name=FLAGS.dataset,
              attack_type=FLAGS.attack_type,
              )


if __name__ == '__main__':
    flags.DEFINE_integer('nb_filters', 64,
                         'Model size multiplier')
    flags.DEFINE_integer('nb_epochs', 10,
                         'Number of epochs to train model')
    flags.DEFINE_integer('batch_size', 128,
                         'Size of training batches')
    flags.DEFINE_integer('nb_classes', 10,
                         'Number of classes')
    flags.DEFINE_float('train_fpr', 0.05,
                       'faes rate to decide threshold')
    flags.DEFINE_string('dataset', 'mnist', 'train dataset name')
    flags.DEFINE_string("attack_type", "fgsm", 'attack to detect')
    flags.DEFINE_string("test_attack_type", "fgsm", 'attack to detect')
    flags.DEFINE_bool("is_train", False, "train online or load from file")
    flags.DEFINE_string("detection_type", "negative", "which detection type to use")
    flags.DEFINE_string('result_folder', "results", 'The output folder for results.')
    flags.DEFINE_bool('use_cache', False, 'use history cache or get adversarial examples online')
    flags.DEFINE_boolean('detection_train_test_mode', True, 'Split into train/test datasets.')
    flags.DEFINE_float('stdevs', 0.05,
                       'L-2 perturbation size is equal to that of the adversarial samples')
    flags.DEFINE_string('similarity_type', "cos", 'similarity index')
    flags.DEFINE_string('label_type', "type1", 'label assignment')
    tf.app.run()
from speechvalley.utils import load_batched_data, describe, output_to_sequence, list_dirs, logging, count_params, get_num_classes, check_path_exists, dotdict, activation_functions_dict, optimizer_functions_dict
from speechvalley.models import DBiRNN


from tensorflow.python.platform import flags
from tensorflow.python.platform import app

flags.DEFINE_string('task', 'libri', 'set task name of this program')
flags.DEFINE_string('train_dataset', 'train-clean-100', 'set the training dataset')
flags.DEFINE_string('dev_dataset', 'dev-clean', 'set the development dataset')
flags.DEFINE_string('test_dataset', 'test-clean', 'set the test dataset')

flags.DEFINE_string('mode', 'test', 'set whether to train, dev or test')

flags.DEFINE_boolean('keep', True, 'set whether to restore a model, when test mode, keep should be set to True')
flags.DEFINE_string('level', 'cha', 'set the task level, phn, cha, or seq2seq, seq2seq will be supported soon')
flags.DEFINE_string('model', 'DBiRNN', 'set the model to use, DBiRNN, BiRNN, ResNet..')
flags.DEFINE_string('rnncell', 'lstm', 'set the rnncell to use, rnn, gru, lstm...')
flags.DEFINE_integer('num_layer', 2, 'set the layers for rnn')
flags.DEFINE_string('activation', 'tanh', 'set the activation to use, sigmoid, tanh, relu, elu...')
flags.DEFINE_string('optimizer', 'adam', 'set the optimizer to use, sgd, adam...')
flags.DEFINE_boolean('layerNormalization', False, 'set whether to apply layer normalization to rnn cell')

flags.DEFINE_integer('batch_size', 64, 'set the batch size')
flags.DEFINE_integer('num_hidden', 256, 'set the hidden size of rnn cell')
flags.DEFINE_integer('num_feature', 39, 'set the size of input feature')
flags.DEFINE_integer('num_classes', 30, 'set the number of output classes')
flags.DEFINE_integer('num_epochs', 1, 'set the number of epochs')
flags.DEFINE_float('lr', 0.0001, 'set the learning rate')
flags.DEFINE_float('dropout_prob', 0.1, 'set probability of dropout')
Beispiel #25
0
flags.DEFINE_string(
    'logdir', None, """logdir specifies the directory where
TensorBoard will look to find TensorFlow event files that it can display.
TensorBoard will recursively walk the directory structure rooted at logdir,
looking for .*tfevents.* files.

You may also pass a comma separated list of log directories, and TensorBoard
will watch each directory. You can also assign names to individual log
directories by putting a colon between the name and the path, as in

tensorboard --logdir=name1:/path/to/logs/1,name2:/path/to/logs/2
""")

flags.DEFINE_boolean(
    'debug', False, 'Whether to run the app in debug mode. '
    'This increases log verbosity to DEBUG.')

flags.DEFINE_string(
    'host', '0.0.0.0', 'What host to listen to. Defaults to '
    'serving on 0.0.0.0, set to 127.0.0.1 (localhost) to'
    'disable remote access (also quiets security warnings).')

flags.DEFINE_integer('port', 6006, 'What port to serve TensorBoard on.')

FLAGS = flags.FLAGS

# How many elements to store per tag, by tag type
TENSORBOARD_SIZE_GUIDANCE = {
    event_accumulator.COMPRESSED_HISTOGRAMS: 500,
    event_accumulator.IMAGES: 4,
    # Close TF session
    sess.close()

    # Finally, block & display a grid of all the adversarial examples
    if viz_enabled:
        import matplotlib.pyplot as plt
        plt.close(figure)
        # _ = grid_visual(grid_viz_data)

    return report


def main(argv=None):
    mnist_tutorial_jsma(viz_enabled=FLAGS.viz_enabled,
                        nb_epochs=FLAGS.nb_epochs,
                        batch_size=FLAGS.batch_size,
                        nb_classes=FLAGS.nb_classes,
                        source_samples=FLAGS.source_samples,
                        learning_rate=FLAGS.learning_rate)


if __name__ == '__main__':
    flags.DEFINE_boolean('viz_enabled', True, 'Visualize adversarial ex.')
    flags.DEFINE_integer('nb_epochs', 6, 'Number of epochs to train model')
    flags.DEFINE_integer('batch_size', 128, 'Size of training batches')
    flags.DEFINE_integer('nb_classes', 10, 'Number of output classes')
    flags.DEFINE_integer('source_samples', 10, 'Nb of test inputs to attack')
    flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')

    tf.app.run()
Beispiel #27
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import copy
import sys
import unittest

from tensorflow.python.platform import app
from tensorflow.python.platform import flags

flags.DEFINE_string("string_foo", "default_val", "HelpString")
flags.DEFINE_integer("int_foo", 42, "HelpString")
flags.DEFINE_float("float_foo", 42.0, "HelpString")

flags.DEFINE_boolean("bool_foo", True, "HelpString")
flags.DEFINE_boolean("bool_negation", True, "HelpString")
flags.DEFINE_boolean("bool-dash-negation", True, "HelpString")
flags.DEFINE_boolean("bool_a", False, "HelpString")
flags.DEFINE_boolean("bool_c", False, "HelpString")
flags.DEFINE_boolean("bool_d", True, "HelpString")
flags.DEFINE_bool("bool_e", True, "HelpString")

FLAGS = flags.FLAGS


class FlagsTest(unittest.TestCase):
    def testString(self):
        res = FLAGS.string_foo
        self.assertEqual(res, "default_val")
        FLAGS.string_foo = "bar"
  return report


def main(argv=None):
  mnist_tutorial_cw(viz_enabled=FLAGS.viz_enabled,
                    nb_epochs=FLAGS.nb_epochs,
                    batch_size=FLAGS.batch_size,
                    source_samples=FLAGS.source_samples,
                    learning_rate=FLAGS.learning_rate,
                    attack_iterations=FLAGS.attack_iterations,
                    model_path=FLAGS.model_path,
                    targeted=FLAGS.targeted)


if __name__ == '__main__':
  flags.DEFINE_boolean('viz_enabled', VIZ_ENABLED,
                       'Visualize adversarial ex.')
  flags.DEFINE_integer('nb_epochs', NB_EPOCHS,
                       'Number of epochs to train model')
  flags.DEFINE_integer('batch_size', BATCH_SIZE, 'Size of training batches')
  flags.DEFINE_integer('source_samples', SOURCE_SAMPLES,
                       'Number of test inputs to attack')
  flags.DEFINE_float('learning_rate', LEARNING_RATE,
                     'Learning rate for training')
  flags.DEFINE_string('model_path', MODEL_PATH,
                      'Path to save or load the model file')
  flags.DEFINE_integer('attack_iterations', ATTACK_ITERATIONS,
                       'Number of iterations to run attack; 1000 is good')
  flags.DEFINE_boolean('targeted', TARGETED,
                       'Run the tutorial in targeted mode?')

  tf.app.run()
Beispiel #29
0
flags.DEFINE_integer('nb_labels', 10, 'Number of output classes')

flags.DEFINE_string('data_dir','/tmp','Temporary storage')
flags.DEFINE_string('train_dir','/tmp/train_dir','Where model chkpt are saved')
flags.DEFINE_string('teachers_dir','/tmp/train_dir',
                    'Directory where teachers checkpoints are stored.')

flags.DEFINE_integer('teachers_max_steps', 3000,
                     """Number of steps teachers were ran.""")
flags.DEFINE_integer('max_steps', 3000, """Number of steps to run student.""")
flags.DEFINE_integer('nb_teachers', 10, """Teachers in the ensemble.""")
tf.app.flags.DEFINE_integer('stdnt_share', 1000,
                            """Student share (last index) of the test data""")
flags.DEFINE_integer('lap_scale', 10,
                     """Scale of the Laplacian noise added for privacy""")
flags.DEFINE_boolean('save_labels', False,
                     """Dump numpy arrays of labels and clean teacher votes""")

flags.DEFINE_boolean('deeper', False, """Activate deeper CNN model""")


def ensemble_preds(dataset, nb_teachers, stdnt_data):
  """
  Given a dataset, a number of teachers, and some input data, this helper
  function queries each teacher for predictions on the data and returns
  all predictions in a single array. (That can then be aggregated into
  one single prediction per input using aggregation.py (cf. function
  prepare_student_data() below)
  :param dataset: string corresponding to mnist, cifar10, or svhn
  :param nb_teachers: number of teachers (in the ensemble) to learn from
  :param stdnt_data: unlabeled student training data
  :return: 3d array (teacher id, sample id, probability per class)
Beispiel #30
0
flags.DEFINE_float('meta_lr', 1e-12, 'the base learning rate of the generator')
flags.DEFINE_integer(
    'pretrain_iterations',
    5600,
    'number of pre-training iterations.')
flags.DEFINE_integer(
    'metatrain_iterations',
    1,
    'number of metatraining iterations.')
flags.DEFINE_integer('feed_length', 3, 'length of days to feed')
flags.DEFINE_string(
    'logdir',
    '',
    'directory for summaries and checkpoints.')
flags.DEFINE_integer('test_iter', -1, 'iteration to load model (-1 for latest model)')
flags.DEFINE_boolean('resume', False,'resume or not')
flags.DEFINE_integer('num_updates',5,'number of updates')


def train(model, saver, sess, exp_string, data_generator, resume_itr):
    SUMMARY_INTERVAL = 500
    SAVE_INTERVAL = 1000
    PRINT_INTERVAL = 500
    TEST_PRINT_INTERVAL = PRINT_INTERVAL * 5
    print('Done initializing')
    prelosses, postlosses = [], []
    inputs, lables = data_generator.generate_time_series_batch(train=True)
    for itr in range(
            resume_itr,
            FLAGS.pretrain_iterations +
            FLAGS.metatrain_iterations):