Beispiel #1
0
from consumers.margin_loss import MarginLoss
from consumers.task_embedding import TaskEmbedding
from data.data_sequencer import DataSequencer
from data.generator import Generator
from networks.cnn import CNN
from trainers.il_trainer import ILTrainer
from trainers.pipeline import Pipeline
from trainers.summary_writer import SummaryWriter
import os
from networks.save_load import Saver, Loader

# Dataset/method options
flags.DEFINE_string('dataset', 'sim_reach', 'One of sim_reach, sim_push.')

# Training Options
flags.DEFINE_integer('iterations', 500000,
                     'The number of training iterations.')
flags.DEFINE_integer(
    'batch_size', 64,
    'The number of tasks sampled per batch (aka batch size).')
flags.DEFINE_float('lr', 0.0001, 'The learning rate.')
flags.DEFINE_integer('support', 5,
                     'The number of support examples per task (aka k-shot).')
flags.DEFINE_integer('query', 5, 'The number of query examples per task.')
flags.DEFINE_integer('embedding', 20, 'The embedding size.')

# Model Options
flags.DEFINE_string('activation', 'relu', 'One of relu, elu, or leaky_relu.')
flags.DEFINE_bool('max_pool', False, 'Use max pool rather than strides.')
flags.DEFINE_list('filters', [32, 64],
                  'List of filters per convolution layer.')
flags.DEFINE_list('kernels', [3, 3],
import keras
from keras import backend

import tensorflow as tf
from tensorflow.python.platform import app
from tensorflow.python.platform import flags

from cleverhans.utils_mnist import data_mnist
from cleverhans.utils_tf import model_train, model_eval, batch_eval
from cleverhans.attacks import fgsm
from cleverhans.utils import cnn_model

FLAGS = flags.FLAGS

flags.DEFINE_integer('nb_epochs', 6, 'Number of epochs to train model')
flags.DEFINE_integer('batch_size', 128, 'Size of training batches')
flags.DEFINE_float('learning_rate', 0.1, 'Learning rate for training')


def main(argv=None):
    """
    MNIST cleverhans tutorial
    :return:
    """

    # Set TF random seed to improve reproducibility
    tf.set_random_seed(1234)

    if not hasattr(backend, "tf"):
        raise RuntimeError("This tutorial requires keras to be configured"
Beispiel #3
0
            feature_dict['feature' + str(i + 1)] = tf.FixedLenFeature([],
                                                                      tf.int64)
        else:
            feature_dict['feature' + str(i + 1)] = tf.FixedLenFeature(
                [], tf.string)
    features = tf.parse_single_example(serialized_example,
                                       features=feature_dict)
    return features


#======================================================================================
## test code
flags.DEFINE_string("scale", "big", "specify your dataset scale")
flags.DEFINE_string("logdir", "/home/pony/github/data/inputpipeline/big",
                    "specify the location to store log or model")
flags.DEFINE_integer("samples_num", 80, "specify your total number of samples")
flags.DEFINE_integer("time_length", 2, "specify max time length of sample")
flags.DEFINE_integer("feature_size", 2, "specify feature size of sample")
flags.DEFINE_integer("num_epochs", 100, "specify number of training epochs")
flags.DEFINE_integer("batch_size", 2, "specify batch size when training")
flags.DEFINE_integer("num_classes", 10, "specify number of output classes")
FLAGS = flags.FLAGS

if __name__ == '__main__':
    scale = FLAGS.scale
    logdir = FLAGS.logdir
    sn = FLAGS.samples_num
    tl = FLAGS.time_length
    fs = FLAGS.feature_size
    num_epochs = FLAGS.num_epochs
    batch_size = FLAGS.batch_size
Beispiel #4
0
    mnist_blackbox(nb_classes=FLAGS.nb_classes,
                   batch_size=FLAGS.batch_size,
                   learning_rate=FLAGS.learning_rate,
                   nb_epochs=FLAGS.nb_epochs,
                   holdout=FLAGS.holdout,
                   data_aug=FLAGS.data_aug,
                   nb_epochs_s=FLAGS.nb_epochs_s,
                   lmbda=FLAGS.lmbda,
                   aug_batch_size=FLAGS.data_aug_batch_size,
                   preprocess=FLAGS.preprocess)


if __name__ == '__main__':

    # General flags
    flags.DEFINE_integer('nb_classes', NB_CLASSES,
                         'Number of classes in problem')
    flags.DEFINE_integer('batch_size', BATCH_SIZE, 'Size of training batches')
    flags.DEFINE_float('learning_rate', LEARNING_RATE,
                       'Learning rate for training')

    # Flags related to oracle
    flags.DEFINE_integer('nb_epochs', NB_EPOCHS,
                         'Number of epochs to train model')

    # Flags related to substitute
    flags.DEFINE_integer('holdout', HOLDOUT, 'Test set holdout for adversary')
    flags.DEFINE_integer('data_aug', DATA_AUG,
                         'Number of substitute data augmentations')
    flags.DEFINE_integer('nb_epochs_s', NB_EPOCHS_S,
                         'Training epochs for substitute')
    flags.DEFINE_float('lmbda', LMBDA, 'Lambda from arxiv.org/abs/1602.02697')
Beispiel #5
0
from EmoEstimator.utils.evaluate import print_summary
from data_generator import DataGenerator
from maml import MAML
from tensorflow.python.platform import flags
from datetime import datetime
import os

start_time = datetime.now()
FLAGS = flags.FLAGS

## Dataset/method options
flags.DEFINE_string('datasource', 'disfa',
                    'sinusoid or omniglot or miniimagenet')
flags.DEFINE_integer(
    'num_classes', 2,
    'number of classes used in classification (e.g. 5-way classification).')
# oracle means task id is input (only suitable for sinusoid)
flags.DEFINE_string('baseline', None, 'oracle, or None')

## Training options
flags.DEFINE_integer('pretrain_iterations', 0,
                     'number of pre-training iterations.')
flags.DEFINE_integer(
    'metatrain_iterations', 100,
    'number of metatraining iterations.')  # 15k for omniglot, 50k for sinusoid
flags.DEFINE_integer('meta_batch_size', 1,
                     'number of tasks sampled per meta-update')
flags.DEFINE_float('meta_lr', 0.001, 'the base learning rate of the generator')
flags.DEFINE_integer(
    'update_batch_size', 5,
Beispiel #6
0
        t1 = time.time()
        acc = model_eval(sess,
                         x_image,
                         y,
                         preds_adv,
                         X_test,
                         Y_test,
                         args=eval_par)
        t2 = time.time()
        print("Took", t2 - t1, "seconds")
        print('Test accuracy on adversarial examples: %0.4f\n' % acc)


if __name__ == '__main__':

    dirs = ['models', 'adv_trained']
    if "MNIST_CHALLENGE_DIR" in os.environ:
        dirs.insert(0, os.environ['MNIST_CHALLENGE_DIR'])
    default_checkpoint_dir = os.path.join(*dirs)

    flags.DEFINE_integer('batch_size', 128, "batch size")
    flags.DEFINE_float('label_smooth', 0.1,
                       ("Amount to subtract from correct label "
                        "and distribute among other labels"))
    flags.DEFINE_string('attack_type', 'fgsm',
                        ("Attack type: 'fgsm'->fast gradient sign"
                         "method, 'bim'->'basic iterative method'"))
    flags.DEFINE_string('checkpoint_dir', default_checkpoint_dir,
                        'Checkpoint directory to load')
    app.run(main)
Beispiel #7
0
def getFlag(model_name):
    use_time = False
    if use_time:
        exp_name = datetime.datetime.now().strftime("%I:%M%p-%Y-%B-%d")
    else:
        exp_name = 'test'

    FLAGS = flags.FLAGS
    # Dataset Options:
    flags.DEFINE_integer('batch_size', 8, 'Size of a batch')

    # Base Model class Mandatory:
    flags.DEFINE_bool('train', True, 'whether to train or test')
    flags.DEFINE_bool('verbose', True,
                      'whether to show print information or not')
    flags.DEFINE_integer('epoch', 30, 'Number of Epochs to train on')
    flags.DEFINE_string('exp', exp_name, 'name of experiments')
    flags.DEFINE_integer('log_interval', 1, 'log outputs every so many epoch')
    flags.DEFINE_integer('val_interval', 3, 'validate every so many epoch')
    flags.DEFINE_integer(
        'patience', 3,
        'number of non-improving validation iterations before early stop')
    flags.DEFINE_integer('save_interval', 10,
                         'save outputs every so many iterations')
    ## Saver load or options:
    flags.DEFINE_integer('max_to_keep', 10, 'maximum number of models to keep')
    flags.DEFINE_integer('keep_checkpoint_every_n_hours', 3,
                         'check point intervals')
    flags.DEFINE_integer(
        'resume_iter', -1,
        'iteration to resume training from, -1 means not resuming')
    flags.DEFINE_string('ckptdir', global_macros.CKPT_ROOT + "/" + model_name,
                        'location where models will be stored')
    flags.DEFINE_string('logdir', global_macros.LOGGER_ROOT + "/" + model_name,
                        'location where log of experiments will be stored')
    ## Plot option:
    flags.DEFINE_bool('plot', True, 'plot after training')
    flags.DEFINE_bool('crop', False, 'crop regions')
    flags.DEFINE_bool('crop_stack', True, 'crop stack/ random crop')

    # learning rate
    flags.DEFINE_bool('L1_loss', False, 'Use L1 or L2 loss')
    flags.DEFINE_bool('weight_decay', False, 'Turn on weight decay or not')
    flags.DEFINE_float('lr', 1e-4, 'Learning rate for training')
    flags.DEFINE_float('lr_decay_val', 10, 'Learning rate decay ratio')
    flags.DEFINE_bool('recompute', False, 'use recomputation')

    # Model specific:
    flags.DEFINE_bool('temp_only', False,
                      'only use temperature channel or not')
    flags.DEFINE_bool('ssim', False, 'use ssim loss or not')

    # Unet specific:
    flags.DEFINE_bool('is_pad', True, 'Use padding for convolution or not')
    flags.DEFINE_integer('nfilters', 8, 'The number of base filters for unet')
    flags.DEFINE_integer('unet_levels', 3, 'Levels of Unet')
    flags.DEFINE_bool('img_emb', False, 'Use image embedding or not')

    # LCN specific:
    flags.DEFINE_list('lcn_kernel', [1, 3, 3], 'Kernel list for lcn model')
    flags.DEFINE_bool('regularize', False, 'Turn on regularizer for LCN')
    flags.DEFINE_float('alpha', 1e5, 'Regularizer value')

    # tile conv LCN
    flags.DEFINE_bool('use_LCN', False,
                      'use LCN as the last layer, tile conv LCN only')
    return FLAGS
Beispiel #8
0
import random
import tensorflow as tf

from data_generator import DataGenerator
from maml import MAML
from tensorflow.python.platform import flags
from tensorflow.python import debug as tf_debug
from tqdm import tqdm
import os

FLAGS = flags.FLAGS
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

## Dataset/method options
flags.DEFINE_string('datasource', 'omniglot', 'omniglot or mnist or miniimagenet or celeba')
flags.DEFINE_integer('num_encoding_dims', -1, 'of unsupervised representation learning method')
flags.DEFINE_string('encoder', 'acai', 'acai or bigan or deepcluster or infogan')

## Training options
flags.DEFINE_integer('metatrain_iterations', 30000, 'number of metatraining iterations.')
flags.DEFINE_integer('meta_batch_size', 8, 'number of tasks sampled per meta-update')
flags.DEFINE_float('meta_lr', 0.001, 'the base learning rate of the generator')
flags.DEFINE_float('update_lr', 0.05, 'step size alpha for inner gradient update.')

flags.DEFINE_integer('inner_update_batch_size_train', 1,
                     'number of examples used for inner gradient update (K for K-shot learning).')
flags.DEFINE_integer('inner_update_batch_size_val', 5, 'above but for meta-val')
flags.DEFINE_integer('outer_update_batch_size', 5, 'number of examples used for outer gradient update')
flags.DEFINE_integer('num_classes_train', 5, 'number of classes used in classification for meta-training')
flags.DEFINE_integer('num_classes_val', 5, 'number of classes used in classification for meta-validation.')
def define():
    """Define common flags."""
    # yapf: disable
    flags.DEFINE_integer('batch_size', 32,
                         'Batch size.')

    flags.DEFINE_integer('crop_width', None,
                         'Width of the central crop for images.')

    flags.DEFINE_integer('crop_height', None,
                         'Height of the central crop for images.')

    flags.DEFINE_string('train_log_dir', '/home/OCR/aocr-logs',
                        'Directory where to write event logs.')

    flags.DEFINE_string('dataset_name', 'number_plates',
                        'Name of the dataset. Supported: fsns')

    flags.DEFINE_string('split_name', 'train',
                        'Dataset split name to run evaluation for: test,train.')

    flags.DEFINE_string('dataset_dir', None,
                        'Dataset root folder.')

    flags.DEFINE_string('checkpoint', '/home/OCR/aocr-logs/model.ckpt-3000',
                        'Path for checkpoint to restore weights from.')

    flags.DEFINE_string('master',
                        '',
                        'BNS name of the TensorFlow master to use.')

    # Model hyper parameters
    flags.DEFINE_float('learning_rate', 0.004,
                       'learning rate')

    flags.DEFINE_string('optimizer', 'momentum',
                        'the optimizer to use')

    flags.DEFINE_float('momentum', 0.9,
                        'momentum value for the momentum optimizer if used')

    flags.DEFINE_bool('use_augment_input', True,
                      'If True will use image augmentation')

    # Method hyper parameters
    # conv_tower_fn
    flags.DEFINE_string('final_endpoint', 'Mixed_5d',
                        'Endpoint to cut inception tower')

    # sequence_logit_fn
    flags.DEFINE_bool('use_attention', True,
                      'If True will use the attention mechanism')

    flags.DEFINE_bool('use_autoregression', True,
                      'If True will use autoregression (a feedback link)')

    flags.DEFINE_integer('num_lstm_units', 256,
                         'number of LSTM units for sequence LSTM')

    flags.DEFINE_float('weight_decay', 0.00004,
                       'weight decay for char prediction FC layers')

    flags.DEFINE_float('lstm_state_clip_value', 10.0,
                       'cell state is clipped by this value prior to the cell'
                       ' output activation')

    # 'sequence_loss_fn'
    flags.DEFINE_float('label_smoothing', 0.1,
                       'weight for label smoothing')

    flags.DEFINE_bool('ignore_nulls', True,
                      'ignore null characters for computing the loss')

    flags.DEFINE_bool('average_across_timesteps', False,
                      'divide the returned cost by the total label weight')
Beispiel #10
0
def main(argv=None):
    mnist_blackbox(nb_classes=FLAGS.nb_classes,
                   batch_size=FLAGS.batch_size,
                   learning_rate=FLAGS.learning_rate,
                   nb_epochs=FLAGS.nb_epochs,
                   holdout=FLAGS.holdout,
                   data_aug=FLAGS.data_aug,
                   nb_epochs_s=FLAGS.nb_epochs_s,
                   lmbda=FLAGS.lmbda,
                   aug_batch_size=FLAGS.data_aug_batch_size)


if __name__ == '__main__':
    # General flags
    flags.DEFINE_integer('nb_classes', 10, 'Number of classes in problem')
    flags.DEFINE_integer('batch_size', 128, 'Size of training batches')
    flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')

    # Flags related to oracle
    flags.DEFINE_integer('nb_epochs', 10, 'Number of epochs to train model')

    # Flags related to substitute
    flags.DEFINE_integer('holdout', 150, 'Test set holdout for adversary')
    flags.DEFINE_integer('data_aug', 6, 'Nb of substitute data augmentations')
    flags.DEFINE_integer('nb_epochs_s', 10, 'Training epochs for substitute')
    flags.DEFINE_float('lmbda', 0.1, 'Lambda from arxiv.org/abs/1602.02697')
    flags.DEFINE_integer('data_aug_batch_size', 512,
                         'Batch size for augmentation')

    tf.app.run()
Beispiel #11
0
# limitations under the License.
# ==============================================================================

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import deep_cnn
import input
import metrics

from tensorflow.python.platform import app
from tensorflow.python.platform import flags

flags.DEFINE_string('dataset', 'svhn', 'The name of the dataset to use')
flags.DEFINE_integer('nb_labels', 10, 'Number of output classes')

flags.DEFINE_string('data_dir', '/tmp', 'Temporary storage')
flags.DEFINE_string('train_dir', '/tmp/train_dir',
                    'Where model ckpt are saved')

flags.DEFINE_integer('max_steps', 3000, """Number of training steps to run.""")
flags.DEFINE_integer('nb_teachers', 50, """Teachers in the ensemble.""")
flags.DEFINE_integer('teacher_id', 0, """ID of teacher being trained.""")

flags.DEFINE_boolean('deeper', False, """Activate deeper CNN model""")

FLAGS = flags.FLAGS


def train_teacher(dataset, nb_teachers, teacher_id):
Beispiel #12
0
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import flags
from data_generator import ImageDataGenerator
from maml import MASF

FLAGS = flags.FLAGS

try:
    os.environ['CUDA_VISIBLE_DEVICES'] = str(sys.argv[1])
except IndexError:
    print('No GPU given... setting to 0')
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'

## Dataset PACS
flags.DEFINE_integer('num_classes', 800,
                     'number of classes used in classification.')

## Training options
flags.DEFINE_integer('train_iterations', 100000,
                     'number of training iterations.')
flags.DEFINE_integer('meta_batch_size', 64,
                     'number of images sampled per source domain')
flags.DEFINE_float('inner_lr', 0.001,
                   'step size alpha for inner gradient update on meta-train')
flags.DEFINE_float(
    'outer_lr', 0.001,
    'learning rate for outer updates with (task-loss + meta-loss)')
flags.DEFINE_float(
    'metric_lr', 0.001,
    'learning rate for the metric embedding nn with AdamOptimizer')
flags.DEFINE_float('margin', 10, 'distance margin in metric loss')
Beispiel #13
0
import tensorflow as tf
from tensorflow.python.platform import flags
from time import time

flags.DEFINE_integer('max_steps', 100, 'max number of steps to run')
flags.DEFINE_integer('warmup', 10, 'max number of steps to warmup')
flags.DEFINE_integer('batch_size', 1000, 'batch size')

FLAGS = flags.FLAGS


def main(_):
    image_name = '../tensorflow/ElCapitan_256_by_256.jpg'
    file_list = [image_name]

    print("Running multi-threaded JPEG decode in TensorFlow. Please wait.\n")

    sess = tf.Session('', config=tf.ConfigProto())

    batch_size = FLAGS.batch_size

    filename_queue = tf.FIFOQueue(-1, tf.string)
    filename_enq_ops = []
    for i in xrange(batch_size):
        filename_enq_op = filename_queue.enqueue(image_name)
        filename_enq_ops.append(filename_enq_op)
    filename_enq_loop = tf.group(*filename_enq_ops)

    image_queue = tf.FIFOQueue(-1, tf.uint8)
    enq_ops = []
    for i in xrange(batch_size):
Beispiel #14
0
# print('rebuild')#rebuild
# datasets='mnist'
# models=['vgg11', 'vgg13', 'vgg16', 'vgg19', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
# 	     'resnet152', 'googlenet12', 'googlenet16', 'googlenet22']
# attacks=['fgsm']
# mu_vars=['gf']
# for model in ['lenet5']:
#     for mu_var in mu_vars:
#         for attack in attacks:
#             tf.reset_default_graph()
#             samples_path='../adv_result/mu_'+datasets+'/'+mu_var+'/'+attack+'/'+model+'/train_data'
#             model_training(datasets=datasets, model_name = model, samples_path=samples_path,
#                              nb_epochs=50, batch_size=128, learning_rate=0.001, attack=attack, mu=True, mu_var=mu_var)


def main(argv=None):
    print('rebuild')  # rebuild

    tf.reset_default_graph()
    samples_path = '../adv_result/mu_' + FLAGS.datasets + '/' + FLAGS.mu_var + '/' + FLAGS.attack + '/' + FLAGS.model_name + '/train_data'
    model_training(datasets=FLAGS.datasets, model_name=FLAGS.model_name, samples_path=samples_path,
                   nb_epochs=FLAGS.epochs, batch_size=128, learning_rate=0.001, attack=FLAGS.attack, mu=True, mu_var=FLAGS.mu_var)

if __name__ == '__main__':
    flags.DEFINE_string('datasets', 'mnist', 'The target datasets.')
    flags.DEFINE_string('attack', 'fgsm', 'attack_method')  # '../mt_result/mnist_jsma/adv_jsma'
    flags.DEFINE_string('model_name', 'lenet1', 'model_name')
    flags.DEFINE_string('mu_var', 'gf', '')
    flags.DEFINE_integer('epochs', 50, '')

    tf.app.run()
Beispiel #15
0
   Author :       charlesXu
   date:          2018/12/28
-------------------------------------------------
   Change Activity: 2018/12/28:
-------------------------------------------------
"""
import os
import tensorflow as tf

from tensorflow.python.platform import flags
from model import rnn_model
from poems import process_poems, generate_batch

data_path = 'F:\project\Chatbot_CN\Chatbot_Data\Text_generator\poem\poems.txt'

flags.DEFINE_integer('batch_size', 64, 'batch size.')
flags.DEFINE_float('learning_rate', 0.01, 'learning rate.')
flags.DEFINE_string('model_dir', os.path.abspath('./model'),
                    'model save path.')
flags.DEFINE_string('file_path', os.path.abspath(data_path),
                    'file name of poems.')
flags.DEFINE_string('model_prefix', 'poems', 'model save prefix.')
flags.DEFINE_integer('epochs', 50, 'train how many epochs.')

# FLAGS = tf.app.flags.FLAGS
FLAGS = flags.FLAGS


def run_training():
    if not os.path.exists(FLAGS.model_dir):
        os.makedirs(FLAGS.model_dir)
Beispiel #16
0
""" Utility functions. """
import numpy as np
import os
import random
import tensorflow as tf
import warnings

from tensorflow.contrib.layers.python import layers as tf_layers
from tensorflow.python.platform import flags
from tensorflow.contrib.framework import sort

FLAGS = flags.FLAGS
flags.DEFINE_integer('spec_iter', 1,
                     'Number of iterations to normalize spectrum of matrix')
flags.DEFINE_float('spec_norm_val', 1.0, 'Desired norm of matrices')
flags.DEFINE_bool('downsample', False,
                  'Wheter to do average pool downsampling')
flags.DEFINE_bool('spec_eval', False,
                  'Set to true to prevent spectral updates')


def get_median(v):
    v = tf.reshape(v, [-1])
    m = tf.shape(v)[0] // 2
    return tf.nn.top_k(v, m)[m - 1]


def set_seed(seed):
    import torch
    import numpy
    import random
Beispiel #17
0
from tf_utils.keras_utils import *
from tf_utils.make_models import *
from tf_utils.tf_utils import *
from utils.utils import *

FLAGS = flags.FLAGS

BATCH_SIZE = 32
STEPS_PER_EPOCH = 50000 // BATCH_SIZE

#NOTE: THIS NEEDS ./
flags.DEFINE_string('train_dir', './cifar10_basic/',
                    'Directory storing the saved model.')
flags.DEFINE_string('filename', 'model.ckpt', 'Filename to save model under.')
#flags.DEFINE_integer('nb_epochs', 6, 'Number of epochs to train model')
flags.DEFINE_integer('batch_size', BATCH_SIZE, 'Size of training batches.')
#Must divide evenly into the dataset sizes. (FIX this later)')
#for batch_size = 100, is 600 times nb_epochs
#STEPS
flags.DEFINE_integer('max_steps', 100 * STEPS_PER_EPOCH,
                     'Number of steps to run trainer.')
flags.DEFINE_integer('print_steps', STEPS_PER_EPOCH, 'Print progress every...')
flags.DEFINE_integer('eval_steps', STEPS_PER_EPOCH, 'Run evaluation every...')
flags.DEFINE_integer('save_steps', STEPS_PER_EPOCH, 'Run evaluation every...')
flags.DEFINE_integer('summary_steps', 2 * STEPS_PER_EPOCH,
                     'Run summary every...')
flags.DEFINE_float('learning_rate', 0.0001, 'Learning rate for training')
flags.DEFINE_integer('verbosity', 1, 'How chatty')
flags.DEFINE_float('label_smooth', 0,
                   'How much to clip y values (0 for no clipping)')
flags.DEFINE_float('epsilon', 0.3, 'Strength of attack')
Beispiel #18
0
'''
Holds parameter definitions for model.
'''
from tensorflow.python.platform import flags
import json
import math

FLAGS = flags.FLAGS
flags.DEFINE_integer("attack_epochs", 1, \
                    "how many iterations to run the attack for")
flags.DEFINE_integer("attack_batch_size", 1, "batch size to use in the attack")
flags.DEFINE_integer('image_height', 299, \
                        'Height of each input images.')
flags.DEFINE_integer('image_width', 299, \
                        'Width of each input images.')
flags.DEFINE_integer("image_channels", 3, \
                        "Number of channels in input image")
flags.DEFINE_string('noise_initial', '', \
    'Specifies the initialization of the noise \
    Options: zeros, random_normal'                                  )
flags.DEFINE_float("noise_init_mean", 0.0, \
                    "when random normally initializing the noise, use this as mean")
flags.DEFINE_float("noise_init_stddev", 1.0, \
                    "when random normally initializing the noise, use this as stddev")
flags.DEFINE_integer('num_classes', 1001, \
                        'The number of classes this network is trained on')
flags.DEFINE_float('pixel_high', 1.0, \
    'The maximum pixel value in this setting')
flags.DEFINE_float('pixel_low', 0.0, \
    'The minimum pixel value in this setting')
flags.DEFINE_boolean("tf_allow_growth", False, \
Beispiel #19
0
A simple usage example:
python eval.py
"""
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow import app
from tensorflow.python.platform import flags

from . import data_provider
from . import common_flags

FLAGS = flags.FLAGS
common_flags.define()

# yapf: disable
flags.DEFINE_integer('num_batches', 100,
                     'Number of batches to run eval for.')

flags.DEFINE_string('eval_log_dir', '/tmp/attention_ocr/eval',
                    'Directory where the evaluation results are saved to.')

flags.DEFINE_integer('eval_interval_secs', 60,
                     'Frequency in seconds to run evaluations.')

flags.DEFINE_integer('number_of_steps', None,
                     'Number of times to run evaluation.')


# yapf: enable


def main(_):
Beispiel #20
0
import tflib as lib
import tflib.load_lsun
import tflib.load_celebA
import tflib.small_imagenet
import tflib.save_images
import tflib.plot
import tflib.ops.linear
import tflib.ops.conv2d
import tflib.ops.batchnorm
import tflib.ops.deconv2d
import tflib.ops.layernorm

flags.DEFINE_string( "mode", "regularized_gan", "MODE: regularized_gan, gan, wgan, wgan-gp, lsgan")
flags.DEFINE_string( "architecture", "None", "choice of architecture - see GeneratorAndDiscriminator()")
flags.DEFINE_string( "dataset", "celebA", "name of the dataset [ImageNet, lsun, celebA]")
flags.DEFINE_integer("n_gpus", 1, "number of gpus to use")
flags.DEFINE_integer("iters", 100000, "How many iterations to train for")
flags.DEFINE_float(  "gamma", 0.1, "noise variance for regularizer [0.1]")
flags.DEFINE_boolean("annealing", False, "annealing gamma_0 to decay_factor*gamma_0 [False]")
flags.DEFINE_float(  "decay_factor", 0.01, "exponential annealing decay rate [0.01]")
flags.DEFINE_boolean("unreg", False, "turn regularization off when in regularized_gan mode.")
flags.DEFINE_float(  "disc_learning_rate", 0.0002, "(initial) learning rate.")
flags.DEFINE_float(  "gen_learning_rate", 0.0002, "(initial) learning rate.")
flags.DEFINE_integer("disc_update_steps", 1, "discriminator update steps.")
flags.DEFINE_integer("batch_size", 64, "batch size [64]")
flags.DEFINE_string( "root_dir", "RUN_STATS", "root directory [RUN_STATS]")
flags.DEFINE_string( "checkpoint_dir", "None", "directory to load the checkpoints from [None]")
FLAGS = flags.FLAGS


def main():
            report.train_clean_train_adv_eval = acc
        return report


def main(argv=None):
    mnist_tutorial(nb_epochs=FLAGS.nb_epochs,
                   batch_size=FLAGS.batch_size,
                   learning_rate=FLAGS.learning_rate,
                   clean_train=FLAGS.clean_train,
                   backprop_through_attack=FLAGS.backprop_through_attack,
                   nb_filters=FLAGS.nb_filters)


if __name__ == '__main__':

    flags.DEFINE_integer('nb_filters', 64, 'Model size multiplier')

    flags.DEFINE_integer('nb_epochs', 6, 'Number of epochs to train model')

    flags.DEFINE_integer('batch_size', 128, 'Size of training batches')

    flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')

    flags.DEFINE_bool('clean_train', True, 'Train on clean examples')

    flags.DEFINE_bool('backprop_through_attack', False,
                      ('If True, backprop through adversarial example '
                       'construction process during adversarial training'))

    tf.app.run()
Beispiel #22
0
        # choose the method of preprocess image
        preprocess_image = preprocess_image_1

        train_start = 0
        train_end = 73257
        test_start = 0
        test_end = 26032

        # Get SVHN test data
        X_train, Y_train, X_test, Y_test = data_svhn(train_start=train_start,
                                                     train_end=train_end,
                                                     test_start=test_start,
                                                     test_end=test_end,
                                                     preprocess=preprocess_image)
        sample = X_test[198:199]
        imsave(FLAGS.sample, deprocess_image_1(sample))
    cw(datasets=FLAGS.datasets,
       sample=FLAGS.sample,
       model_name=FLAGS.model,
       target=FLAGS.target,
       store_path=FLAGS.store_path)


if __name__ == '__main__':
    flags.DEFINE_string('datasets', 'mnist', 'The target datasets.')
    flags.DEFINE_string('sample', '../datasets/integration/mnist/2.png', 'The path to load sample.')
    flags.DEFINE_string('model', 'lenet1', 'The name of model.')
    flags.DEFINE_integer('target', 2, 'target')
    flags.DEFINE_string('store_path', '../mt_result/integration/cw/mnist', 'The path to store adversaries.')

tf.app.run()
Beispiel #23
0
        feed_dict = {x: adv}
        probabilities = sess.run(preds, feed_dict)
        print(probabilities)

        #Save adversial image
        two_d_img = (np.reshape(adv, (28, 28)) * 255).astype(np.uint8)
        from PIL import Image
        save_image = Image.fromarray(two_d_img)
        save_image = save_image.convert('RGB')
        save_image.save("cw_attack_res.png")

        # Close TF session
        sess.close()
    return


def main(argv=None):
    mnist_tutorial_cw(nb_classes=FLAGS.nb_classes,
                      attack_iterations=FLAGS.attack_iterations,
                      targeted=FLAGS.targeted)


if __name__ == '__main__':
    flags.DEFINE_integer('nb_classes', 10, 'Number of output classes')
    flags.DEFINE_integer('attack_iterations', 100,
                         'Number of iterations to run attack; 1000 is good')
    flags.DEFINE_boolean('targeted', True,
                         'Run the tutorial in targeted mode?')

    tf.app.run()
 an equal separation distribution of the video images. Implementation supports Optical Flow
 (currently OpenCV's calcOpticalFlowFarneback) as an additional 4th channel.
"""

from tensorflow.python.platform import gfile
from tensorflow.python.platform import flags
from tensorflow.python.platform import app
import cv2 as cv2
import numpy as np
import math
import os
import tensorflow as tf
import time

FLAGS = flags.FLAGS
flags.DEFINE_integer('n_videos_in_record', 10,
                     'Number of videos stored in one single tfrecord file')
flags.DEFINE_string(
    'image_color_depth', "uint8",
    'Color depth as string for the images stored in the tfrecord files. '
    'Has to correspond to the source video color depth. '
    'Specified as dtype (e.g. uint8 or uint16)')
flags.DEFINE_string('file_suffix', "*.mp4",
                    'defines the video file type, e.g. .mp4')

flags.DEFINE_string('source', './example/input', 'Directory with video files')
flags.DEFINE_string('destination', './example/output',
                    'Directory for storing tf records')
flags.DEFINE_boolean(
    'optical_flow', True,
    'Indicates whether optical flow shall be computed and added as fourth '
    'channel.')
Beispiel #25
0
# ERROR : Could not create cudnn handle: CUDNN_STATUS_NOT_INITIALIZED
tf_config = tf.compat.v1.ConfigProto()
tf_config.gpu_options.allow_growth = True
tf_config.allow_soft_placement = True
tf_config.log_device_placement = False
#tf_config.gpu_options.per_process_gpu_memory_fraction=0.95
#tf_config.gpu_options.allocator_type = 'BFC'
tf.compat.v1.keras.backend.set_session(tf.Session(config=tf_config))

FLAGS = flags.FLAGS
common_flags.define()

# yapf: disable
flags.DEFINE_integer('task', 0,
                     'The Task ID. This value is used when training with '
                     'multiple workers to identify each worker.')

flags.DEFINE_integer('ps_tasks', 0,
                     'The number of parameter servers. If the value is 0, then'
                     ' the parameters are handled locally by the worker.')

flags.DEFINE_integer('save_summaries_secs', 60,
                     'The frequency with which summaries are saved, in '
                     'seconds.')

flags.DEFINE_integer('save_interval_secs', 600,
                     'Frequency in seconds of saving the model.')

flags.DEFINE_integer('max_number_of_steps', int(1e10),
                     'The maximum number of gradient steps.')
Beispiel #26
0
from tqdm import tqdm

import tensorflow as tf
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
import tensorflow.contrib.slim as slim

import input_data
import models
import utils as u

FLAGS = flags.FLAGS

flags.DEFINE_integer(
    'num_labeled', 100,
    'Number of labeled samples to use for training. (None = all labeled samples)'
)
flags.DEFINE_integer('batch_size', 100, 'Number of samples used per batch.')
flags.DEFINE_integer('num_iters', 1000000, 'Number of training steps.')
flags.DEFINE_integer('eval_interval', 1000,
                     'Number of steps between evaluations.')
flags.DEFINE_float('learning_rate', 0.001,
                   'Initial learning rate for optimizer.')
flags.DEFINE_float('lr_decay_steps', 5000,
                   'Interval of steps for learning rate decay.')
flags.DEFINE_float('lr_decay_factor', 0.33,
                   'Learning rate exponential decay factor.')


def main(_):
    data_tr, labels_tr, data_te, labels_te, unlabeled = input_data.load_mnist(
Beispiel #27
0
import numpy as np
import pickle
import random
import tensorflow as tf

from data_generator import DataGenerator
from maml import MAML
from tensorflow.python.platform import flags

FLAGS = flags.FLAGS

## Dataset/method options
flags.DEFINE_string('datasource', 'sinusoid',
                    'sinusoid or omniglot or miniimagenet')
flags.DEFINE_integer(
    'num_classes', 5,
    'number of classes used in classification (e.g. 5-way classification).')
# oracle means task id is input (only suitable for sinusoid)
flags.DEFINE_string('baseline', None, 'oracle, or None')

## Training options
flags.DEFINE_integer('pretrain_iterations', 0,
                     'number of pre-training iterations.')
flags.DEFINE_integer(
    'metatrain_iterations', 15000,
    'number of metatraining iterations.')  # 15k for omniglot, 50k for sinusoid
flags.DEFINE_integer('meta_batch_size', 25,
                     'number of tasks sampled per meta-update')
flags.DEFINE_float('meta_lr', 0.001, 'the base learning rate of the generator')
flags.DEFINE_integer(
    'update_batch_size', 5,
  # Run an evaluation of our model against fgsm
  total = 0
  correct = 0
  for xs, ys in test_loader:
    adv_preds = sess.run(adv_preds_op, feed_dict={x_op: xs})
    correct += (np.argmax(adv_preds, axis=1) == ys).sum()
    total += len(xs)

  acc = float(correct) / total
  print('Adv accuracy: {:.3f}'.format(acc * 100))
  report.clean_train_adv_eval = acc
  return report


def main(_=None):
  mnist_tutorial(nb_epochs=FLAGS.nb_epochs,
                 batch_size=FLAGS.batch_size,
                 learning_rate=FLAGS.learning_rate)


if __name__ == '__main__':
  flags.DEFINE_integer('nb_epochs', NB_EPOCHS,
                       'Number of epochs to train model')
  flags.DEFINE_integer('batch_size', BATCH_SIZE,
                       'Size of training batches')
  flags.DEFINE_float('learning_rate', LEARNING_RATE,
                     'Learning rate for training')

  tf.app.run()
    # Close TF session
    sess.close()

    # Finally, block & display a grid of all the adversarial examples
    if viz_enabled:
        import matplotlib.pyplot as plt
        plt.close(figure)
        _ = grid_visual(grid_viz_data)

    return report


def main(argv=None):
    mnist_tutorial_jsma(viz_enabled=FLAGS.viz_enabled,
                        nb_epochs=FLAGS.nb_epochs,
                        batch_size=FLAGS.batch_size,
                        nb_classes=FLAGS.nb_classes,
                        source_samples=FLAGS.source_samples,
                        learning_rate=FLAGS.learning_rate)


if __name__ == '__main__':
    flags.DEFINE_boolean('viz_enabled', False, 'Visualize adversarial ex.')
    flags.DEFINE_integer('nb_epochs', 6, 'Number of epochs to train model')
    flags.DEFINE_integer('batch_size', 128, 'Size of training batches')
    flags.DEFINE_integer('nb_classes', 10, 'Number of output classes')
    flags.DEFINE_integer('source_samples', 600, 'Nb of test inputs to attack')
    flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')

    tf.app.run()
Beispiel #30
0
import numpy as np
from scipy.misc import imsave
import matplotlib.pyplot as plt
from easydict import EasyDict

from utils import ReplayBuffer
from torch.optim import Adam, SGD
import torch.multiprocessing as mp
import torch.distributed as dist

from torch.nn.parallel import DistributedDataParallel as DDP

FLAGS = flags.FLAGS

# Distributed training hyperparameters
flags.DEFINE_integer('nodes', 1,
    'number of nodes for training')
flags.DEFINE_integer('gpus', 1,
    'number of gpus per nodes')
flags.DEFINE_integer('node_rank', 0,
    'rank of node')

# Configurations for distributed training
flags.DEFINE_string('master_addr', '8.8.8.8',
    'address of communicating server')
flags.DEFINE_string('port', '10002',
    'port of training')
flags.DEFINE_bool('slurm', False,
    'whether we are on slurm')
flags.DEFINE_bool('repel_im', True,
    'maximize entropy by repeling images from each other')
flags.DEFINE_bool('hmc', False,