Пример #1
0
def main(model_name, model_type):
    np.random.seed(0)
    assert keras.backend.backend() == "tensorflow"
    set_mnist_flags()

    flags.DEFINE_bool('NUM_EPOCHS', args.epochs, 'Number of epochs')

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist()

    data_gen = data_gen_mnist(X_train)

    x = K.placeholder(
        (None, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))

    y = K.placeholder(shape=(None, FLAGS.NUM_CLASSES))

    model = model_mnist(type=model_type)

    # Train an MNIST model
    tf_train(x, y, model, X_train, Y_train, data_gen)

    # Finally print the result!
    test_error = tf_test_error_rate(model, x, X_test, Y_test)
    print('Test error: %.1f%%' % test_error)
    save_model(model, model_name)
    json_string = model.to_json()
    with open(model_name + '.json', 'wr') as f:
        f.write(json_string)
Пример #2
0
def define_common_flags():
    """Define common flags."""
    # common flags
    flags.DEFINE_integer('batch_size', 1, 'Batch size.')
    flags.DEFINE_integer('crop_width', None,
                         'Width of the central crop for images.')
    flags.DEFINE_integer('crop_height', None,
                         'Height of the central crop for images.')
    flags.DEFINE_string(
        'train_log_dir',
        'my_logs',  # default: logs
        'Directory where to write event logs.')
    flags.DEFINE_string('dataset_name', 'van',
                        'Name of the dataset. Supported: fsns')
    flags.DEFINE_string('model_name', 'model', 'Name of the model.')
    flags.DEFINE_string(
        'split_name', 'train',
        'Dataset split name to run evaluation for: test,train.')
    flags.DEFINE_string('data_root', None, 'Data root folder.')
    flags.DEFINE_string('checkpoint', '',
                        'Path for checkpoint to restore weights from.')
    flags.DEFINE_string('master', '',
                        'BNS name of the TensorFlow master to use.')
    flags.DEFINE_bool('do_augment', False, '')

    # Model hyper parameters
    flags.DEFINE_float('learning_rate', 0.004, 'learning rate')
    flags.DEFINE_string('optimizer', 'momentum', 'the optimizer to use')
    flags.DEFINE_float('momentum', 0.9,
                       'momentum value for the momentum optimizer if used')
    flags.DEFINE_bool('use_augment_input', True,
                      'If True will use image augmentation')

    # Method hyper parameters
    # conv_tower_fn
    flags.DEFINE_string('final_endpoint', 'Mixed_5d',
                        'Endpoint to cut inception tower')

    # sequence_logit_fn
    flags.DEFINE_bool('use_attention', True,
                      'If True will use the attention mechanism')
    flags.DEFINE_bool('use_autoregression', True,
                      'If True will use autoregression (a feedback link)')
    flags.DEFINE_integer('num_lstm_units', 256,
                         'number of LSTM units for sequence LSTM')
    flags.DEFINE_float('weight_decay', 0.00004,
                       'weight decay for char prediction FC layers')
    flags.DEFINE_float(
        'lstm_state_clip_value', 10.0,
        'cell state is clipped by this value prior to the cell output activation'
    )

    # 'sequence_loss_fn'
    flags.DEFINE_float('label_smoothing', 0.1, 'weight for label smoothing')
    flags.DEFINE_bool('ignore_nulls', True,
                      'ignore null characters for computing the loss')
    flags.DEFINE_bool('average_across_timesteps', False,
                      'divide the returned cost by the total label weight')
    flags.DEFINE_bool('use_location', False,
                      'If true will use location attention')
Пример #3
0
def define():
    """定义通用FLAGE"""
    #yaph: disable  #好像是一个谷歌开源的工具,整理python代码用的
    flags.DEFINE_integer('batch_size',32,'Batch_size的大小.')
    flags.DEFINE_integer('crop_width',None,'Width of the central crop for images.')
    flags.DEFINE_integer('crop_height',None,'Height of the central crop for images')
    flags.DEFINE_string('train_log_dir','tmp/attention_ocr/train','保存是日志的路径')
    flags.DEFINE_string('dataset_name','fsns','数据集的名字,由fsns支持')
    flags.DEFINE_string('split_name','train','Dataset split name to run evaluation for :test,train.')
    flags.DEFINE_string('dataset_dir',None,'Dataset root folder.')
    flags.DEFINE_string('checkpoint','',"恢复模型的路径")
    flags.DEFINE_string('master','','BNS name of the tenforflow master to use')


    #超参数
    flags.DEFINE_float('learning_rate',0.004,'learning rate')
    flags.DEFINE_string('optimizer','momentum','the optimizer to use')
    flags.DEFINE_float('momentum',0.9,'momentum value for the momentum optimizer if used')
    flags.DEFINE_bool('use_augment_input',True,'If True will use image augmentation')


    #超参数方法
    #conv_tower_fn
    flags.DEFINE_string('final_endpoint','Mixed_5d','Endpoint to cut inception tower')

    #sequence_logit_fn
    flags.DEFINE_bool('use_attention',True,'If True will use the attention mechanism')
    flags.DEFINE_bool('use_autoregression',True,'If True will use autoregression(a feedback link)')
    flags.DEFINE_integer('num_lstm_units',256,'number of LSTM units for sequence LSTM')
    flags.DEFINE_float('weight_decay',0.00004,'字符预测全连接层权重的衰减')
    flags.DEFINE_float('lstm_state_clip_value',10.0,'单元状态在输出被激活前被修正')


    #sequence_loss_fn
    flags.DEFINE_float('label_smoothing',0.1,'标签平滑移动的权重')
    flags.DEFINE_bool('ignore_nulls',True,'计算loss的时候,忽略空字符串')
    flags.DEFINE_bool('average_across_timesteps',False,'通过总的标签的权重来分割返回来的代价')
def main(model_name, model_type):
    np.random.seed(0)
    assert keras.backend.backend() == "tensorflow"
    set_mnist_flags()
    
    flags.DEFINE_bool('NUM_EPOCHS', args.epochs, 'Number of epochs')

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist()

    # Initialize substitute training set reserved for adversary
    X_sub = X_test[:300]
    Y_sub = np.argmax(Y_test[:300], axis=1)

    # Redefine test set as remaining samples unavailable to adversaries
    X_test = X_test[300:]
    Y_test = Y_test[300:]

    x = K.placeholder((None,
                       FLAGS.IMAGE_ROWS,
                       FLAGS.IMAGE_COLS,
                       FLAGS.NUM_CHANNELS
                       ))

    y = K.placeholder(shape=(None, FLAGS.NUM_CLASSES))

    # Load Black-Box model
    model = load_model(blackbox_name)
    prediction = model(x)

    train_sub_out = train_sub(K.get_session(), x, y, prediction, X_sub, Y_sub, nb_classes=FLAGS.NUM_CLASSES,
                                     nb_epochs_s=args.epochs, batch_size=FLAGS.BATCH_SIZE, learning_rate=0.001, data_aug=6, lmbda=0.1, model_type=model_type)
    model_sub, preds_sub = train_sub_out
    eval_params = {
        'batch_size': FLAGS.BATCH_SIZE
    }

    # Finally print the result!
    # test_error = tf_test_error_rate(model_sub, x, X_test, Y_test)
    accuracy = model_eval(K.get_session(), x, y, preds_sub, X_test, Y_test, args=eval_params)
    print('Test accuracy of substitute on legitimate samples: %.3f%%' % accuracy)

    save_model(model_sub, model_name)
    json_string = model_sub.to_json()
    with open(model_name+'.json', 'wr') as f:
        f.write(json_string)
Пример #5
0
def main(model_name, adv_model_names, model_type):
    np.random.seed(0)
    assert keras.backend.backend() == "tensorflow"
    set_mnist_flags()

    flags.DEFINE_bool('NUM_EPOCHS', args.epochs, 'Number of epochs')

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist()

    data_gen = data_gen_mnist(X_train)

    x = K.placeholder(shape=(None, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS,
                             FLAGS.NUM_CHANNELS))

    y = K.placeholder(shape=(FLAGS.BATCH_SIZE, FLAGS.NUM_CLASSES))

    eps = args.eps

    # if src_models is not None, we train on adversarial examples that come
    # from multiple models
    adv_models = [None] * len(adv_model_names)
    for i in range(len(adv_model_names)):
        adv_models[i] = load_model(adv_model_names[i])

    model = model_mnist(type=model_type)

    x_advs = [None] * (len(adv_models) + 1)

    for i, m in enumerate(adv_models + [model]):
        logits = m(x)
        grad = gen_grad(x, logits, y, loss='training')
        x_advs[i] = symbolic_fgs(x, grad, eps=eps)

    # Train an MNIST model
    tf_train(x, y, model, X_train, Y_train, data_gen, x_advs=x_advs)

    # Finally print the result!
    test_error = tf_test_error_rate(model, x, X_test, Y_test)
    print('Test error: %.1f%%' % test_error)
    save_model(model, model_name)
    json_string = model.to_json()
    with open(model_name + '.json', 'wr') as f:
        f.write(json_string)
Пример #6
0
def main():
    np.random.seed(0)
    assert keras.backend.backend() == "tensorflow"

    flags.DEFINE_bool('NUM_EPOCHS', args.epochs, 'Number of epochs')

    # Get MNIST test data
    x_train, y_train, _, _, x_test, y_test = load_dataset_GTSRB(
        n_channel=N_CHANNEL, train_file_name=TRAIN_FILE_NAME)

    # Convert to one-hot encoding
    y_train = keras.utils.to_categorical(y_train, NUM_LABELS)
    y_test = keras.utils.to_categorical(y_test, NUM_LABELS)

    x = K.placeholder(shape=(None, HEIGHT, WIDTH, N_CHANNEL))
    y = K.placeholder(shape=(BATCH_SIZE, NUM_LABELS))

    eps = args.eps
    x_advs = [None]

    model = build_mltscl()

    if args.iter == 0:
        logits = model(x)
        grad = gen_grad(x, logits, y, loss='training')
        x_advs = symbolic_fgs(x, grad, eps=eps)
    elif args.iter == 1:
        x_advs = symb_iter_fgs(model, x, y, steps=40, alpha=0.01, eps=args.eps)

    # Train an MNIST model
    tf_train(x, y, model, x_train, y_train, x_advs=x_advs, benign=args.ben)

    # Finally print the result!
    test_error = tf_test_error_rate(model, x, x_test, y_test)
    print(test_error)

    # Specify model name
    model_name = './tmp/multiscale_adv'
    save_model(model, model_name)
    json_string = model.to_json()
    with open(model_name + '.json', 'wr') as f:
        f.write(json_string)
Пример #7
0
common_flags.define()

flags.DEFINE_string('export_dir', None, 'Directory to export model files to.')
flags.DEFINE_integer(
    'image_width', None,
    'Image width used during training (or crop width if used)'
    ' If not set, the dataset default is used instead.')
flags.DEFINE_integer(
    'image_height', None,
    'Image height used during training(or crop height if used)'
    ' If not set, the dataset default is used instead.')
flags.DEFINE_string('work_dir', '/tmp',
                    'A directory to store temporary files.')
flags.DEFINE_integer('version_number', 1, 'Version number of the model')
flags.DEFINE_bool(
    'export_for_serving', True,
    'Whether the exported model accepts serialized tf.Example '
    'protos as input')


def get_checkpoint_path():
    """Returns a path to a checkpoint based on specified commandline flags.

  In order to specify a full path to a checkpoint use --checkpoint flag.
  Alternatively, if --train_log_dir was specified it will return a path to the
  most recent checkpoint.

  Raises:
    ValueError: in case it can't find a checkpoint.

  Returns:
    A string.
Пример #8
0
flags.DEFINE_string('root', './data/train/pairs', 'log date')
flags.DEFINE_integer('train_iterations', 5000,
                     'number of training iterations.')
flags.DEFINE_integer('meta_batch_size', 128,
                     'number of images sampled per source domain')
flags.DEFINE_float('inner_lr', 0.001,
                   'step size alpha for inner gradient update on meta-train')
flags.DEFINE_float(
    'outer_lr', 0.001,
    'learning rate for outer updates with (task-loss + meta-loss)')
flags.DEFINE_float(
    'metric_lr', 0.001,
    'learning rate for the metric embedding nn with AdamOptimizer')
flags.DEFINE_float('margin', 10, 'distance margin in metric loss')
flags.DEFINE_bool(
    'clipNorm', True,
    'if True, gradients clip by Norm, otherwise, gradients clip by value')
flags.DEFINE_float('gradients_clip_value', 2.0,
                   'clip_by_value for SGD computing new theta at meta loss')

## Logging, saving, and testing options
flags.DEFINE_string('date', '072720', 'log date')
flags.DEFINE_string('exp', 'mc-maml', 'experiment name')
flags.DEFINE_bool('log', True,
                  'if false, do not log summaries, for debugging code.')
flags.DEFINE_string('logdir', './log',
                    'directory for summaries and checkpoints.')
flags.DEFINE_bool('train', True, 'True to train, False to test.')
flags.DEFINE_bool('resume', False,
                  'resume training if there is a model available')
flags.DEFINE_integer('summary_interval', 20,
Пример #9
0
import unittest

from tensorflow.python.platform import app
from tensorflow.python.platform import flags

flags.DEFINE_string("string_foo", "default_val", "HelpString")
flags.DEFINE_integer("int_foo", 42, "HelpString")
flags.DEFINE_float("float_foo", 42.0, "HelpString")

flags.DEFINE_boolean("bool_foo", True, "HelpString")
flags.DEFINE_boolean("bool_negation", True, "HelpString")
flags.DEFINE_boolean("bool-dash-negation", True, "HelpString")
flags.DEFINE_boolean("bool_a", False, "HelpString")
flags.DEFINE_boolean("bool_c", False, "HelpString")
flags.DEFINE_boolean("bool_d", True, "HelpString")
flags.DEFINE_bool("bool_e", True, "HelpString")
flags.DEFINE_string("string_foo_required", "default_val", "HelpString")
flags.DEFINE_string("none_string_foo_required", None, "HelpString")

FLAGS = flags.FLAGS


class FlagsTest(unittest.TestCase):
    def testString(self):
        res = FLAGS.string_foo
        self.assertEqual(res, "default_val")
        FLAGS.string_foo = "bar"
        self.assertEqual("bar", FLAGS.string_foo)

    def testBool(self):
        res = FLAGS.bool_foo
Пример #10
0
import os.path as osp
import global_macros
import tensorflow as tf
import numpy as np
from Unet2 import Unet2
from tensorflow.python.platform import flags
from utils import TF2FLRD, print_flag

FLAGS = flags.FLAGS

# Dataset Options:
flags.DEFINE_integer('batch_size', 32, 'Size of a batch')
#flags.DEFINE_bool('single', False, 'whether to debug by training on a single image')

# Base Model class Mandatory:
flags.DEFINE_bool('train', True, 'whether to train or test')
flags.DEFINE_integer('epoch_num', 200, 'Number of Epochs to train on')
flags.DEFINE_integer('resume_iter', -1,
    'iteration to resume training from, -1 means not resuming')
flags.DEFINE_string('ckptdir', osp.join(global_macros.CKPT_ROOT, "Unet2"),
    'location where models will be stored')
flags.DEFINE_string('logdir', osp.join(global_macros.LOGGER_ROOT, "Unet2"),
    'location where log of experiments will be stored')
flags.DEFINE_string('exp', 'exp', 'name of experiments')
flags.DEFINE_integer('log_interval', 10, 'log outputs every so many batches')
flags.DEFINE_integer('save_interval', 50,'save outputs every so many batches')
## Saver options:
flags.DEFINE_integer('max_to_keep', 30, 'maximum number of models to keep')
flags.DEFINE_integer('keep_checkpoint_every_n_hours', 3, 'check point intervals')

# Model specific:
Пример #11
0

def main(argv=None):
    print(cifar_blackbox(nb_classes=FLAGS.nb_classes, batch_size=FLAGS.batch_size,
                   learning_rate=FLAGS.learning_rate,
                   nb_epochs=FLAGS.nb_epochs, holdout=FLAGS.holdout,
                   data_aug=FLAGS.data_aug, nb_epochs_s=FLAGS.nb_epochs_s,
                   lmbda=FLAGS.lmbda))


if __name__ == '__main__':
    # General flags
    flags.DEFINE_integer('nb_classes', 10, 'Number of classes in problem')
    flags.DEFINE_integer('batch_size', 128, 'Size of training batches')
    flags.DEFINE_float('learning_rate', 0.0005, 'Learning rate for training')

    # Flags related to oracle
    flags.DEFINE_integer('nb_epochs', 50, 'Number of epochs to train model')

    # Flags related to substitute
    flags.DEFINE_integer('holdout', 150, 'Test set holdout for adversary')
    flags.DEFINE_integer('data_aug', 6, 'Nb of substitute data augmentations')
    flags.DEFINE_integer('nb_epochs_s', 50, 'Training epochs for substitute')
    flags.DEFINE_float('lmbda', 0.1, 'Lambda from arxiv.org/abs/1602.02697')

    # Flags related to saving/loading
    flags.DEFINE_bool('load_pretrain', False, 'load pretrained model from sub_saved/cifar-model')
    flags.DEFINE_string('train_dir', 'sub_saved', 'model saving path')
    flags.DEFINE_string('filename', 'cifar-model', 'cifar model name')
    app.run()
flags.DEFINE_string('walker_weight_envelope', None,
                    'Increase walker weight with an envelope: [None, sigmoid, linear]')

flags.DEFINE_integer('walker_weight_envelope_steps', 100,
                     'Number of steps (after delay) at which envelope '
                     'saturates.')

flags.DEFINE_integer('walker_weight_envelope_delay', 3000,
                     'Number of steps at which envelope starts.')

flags.DEFINE_float('logit_weight', 1.0, 'Weight for logit loss.')

flags.DEFINE_integer('max_steps', 100000, 'Number of training steps.')

flags.DEFINE_bool('augmentation', False,
                  'Apply data augmentation during training.')

flags.DEFINE_integer('new_size', NEW_SIZE,
                     'If > 0, resize image to this width/height.')

flags.DEFINE_integer('virtual_embeddings', 0,
                     'How many virtual embeddings to add.')

flags.DEFINE_string('logdir', LOG_DIR, 'Training log path.')

flags.DEFINE_integer('save_summaries_secs', 150,
                     'How often should summaries be saved (in seconds).')

flags.DEFINE_integer('save_interval_secs', SAVE_INTERVAL_SECS,
                     'How often should checkpoints be saved (in seconds).')
Пример #13
0
flags.DEFINE_integer('pretrain_iterations', 0,
                     'number of pre-training iterations.')
flags.DEFINE_integer('metatrain_iterations', 1000,
                     'number of metatraining iterations.')
flags.DEFINE_integer('meta_batch_size', 8,
                     'number of tasks sampled per meta-update')
flags.DEFINE_float('meta_lr', 0.001, 'the base learning rate of the generator')
flags.DEFINE_integer(
    'update_batch_size', 5,
    'number of examples used for inner gradient update (K for K-shot learning).'
)
flags.DEFINE_float('update_lr', 1.0,
                   'step size alpha for inner gradient update.')
flags.DEFINE_integer('num_updates', 1,
                     'number of inner gradient updates during training.')
flags.DEFINE_bool('grad_clip', True, 'use gradient clipping')
flags.DEFINE_float('clip_min', -80.0, 'minimum for gradient clipping')
flags.DEFINE_float('clip_max', 80.0, 'maximum for gradient clipping')
flags.DEFINE_bool(
    'stop_grad', False,
    'if True, do not use second derivatives in meta-optimization (for speed)')
flags.DEFINE_bool('aug', True, 'use data augmentation')

## Model options
flags.DEFINE_string('norm', 'layer_norm', 'batch_norm, layer_norm, or None')
flags.DEFINE_integer('num_conv_layers', 3, 'number of convolutional layers')
flags.DEFINE_integer('num_filters', 32, 'number of filters for conv nets.')
flags.DEFINE_integer('num_fc_layers', 2, 'number of fully connected layers')
flags.DEFINE_integer('hidden_dim', 40,
                     'hidden dimension of fully connected layers')
flags.DEFINE_bool('fp', True, 'use feature spatial soft-argmax')
Пример #14
0
import os
import random
import warnings

import numpy as np
import tensorflow as tf
from scipy import linalg
from tensorflow.contrib.layers.python import layers as tf_layers
from tensorflow.core.util import event_pb2
from tensorflow.python.platform import flags

FLAGS = flags.FLAGS
flags.DEFINE_integer('spec_iter', 1,
                     'Number of iterations to normalize spectrum of matrix')
flags.DEFINE_float('spec_norm_val', 1.0, 'Desired norm of matrices')
flags.DEFINE_bool('downsample', False,
                  'Wheter to do average pool downsampling')
flags.DEFINE_bool('spec_eval', False,
                  'Set to true to prevent spectral updates')


def safemean(xs):
    return np.nan if len(xs) == 0 else np.mean(xs)


def make_image(tensor):
    """Convert an numpy representation image to Image protobuf"""
    from PIL import Image
    if len(tensor.shape) == 4:
        _, height, width, channel = tensor.shape
    elif len(tensor.shape) == 3:
        height, width, channel = tensor.shape
from tensorflow.contrib import slim
import cv2
from tensorflow.python.platform import gfile
from tensorflow.python.platform import app
from tensorflow.python.platform import flags

from datasets import nav_env
import scripts.script_nav_agent_release as sna
import src.file_utils as fu
from src import graph_utils
from src import utils
FLAGS = flags.FLAGS

flags.DEFINE_string('out_dir', 'vis', 'Directory where to store the output')
flags.DEFINE_string('type', '', 'Optional type.')
flags.DEFINE_bool('first_person', False, 'Visualize the first person view.')
flags.DEFINE_bool('top_view', False, 'Visualize the trajectory in the top view.')
flags.DEFINE_integer('num_steps', 40, 'Number of steps to run the model for.')
flags.DEFINE_string('imset', 'test', '')
flags.DEFINE_string('base_dir', 'output', 'Cache directory.')

def _get_suffix_str():
  return ''


def _load_trajectory():
  base_dir = FLAGS.base_dir
  config_name = FLAGS.config_name+_get_suffix_str()

  dir_name = os.path.join(base_dir, FLAGS.type, config_name)
  tf.logging.info('Waiting for snapshot in directory %s.', dir_name)
Пример #16
0
from tensorflow.compiler.mlir.tfr.python import composite
from tensorflow.compiler.mlir.tfr.python.op_reg_gen import gen_register_op
from tensorflow.compiler.mlir.tfr.python.tfr_gen import tfr_gen_from_module
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gen_array_ops as array_ops
from tensorflow.python.platform import app
from tensorflow.python.platform import flags

Composite = composite.Composite
FLAGS = flags.FLAGS

flags.DEFINE_string(
    'output', None,
    'Path to write the genereated register op file and MLIR file.')

flags.DEFINE_bool('gen_register_op', True,
                  'Generate register op cc file or tfr mlir file.')


# The original kernel is defined in 'tensorflow/python/framework/ops_test.py'
# and prints out the current graph def version.
@Composite('TestAttr')
def _override_test_attr_op():
    ret = array_ops.Const(value=100.0, dtype=dtypes.float32)
    return ret


def main(_):
    if FLAGS.gen_register_op:
        assert FLAGS.output.endswith('.cc')
        generated_code = gen_register_op(sys.modules[__name__], '_override_')
    else:
Пример #17
0
import datetime
import os
from functools import wraps

import numpy as np
import tensorflow.google as tf

from model import DRNN
from utils import output_to_sequence
from utils import get_edit_distance
from utils import load_batched_data
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
flags.DEFINE_string('master', 'local',
                    """BNS name of the TensorFlow runtime to use.""")
flags.DEFINE_bool('is_training', True, 'set whether to train or test')
flags.DEFINE_boolean(
    'restore', False,
    'set whether to restore a model, when test mode, keep should be set to True'
)
flags.DEFINE_string('level', 'phn', 'set the task level, phn, cha.')

flags.DEFINE_string('cell', 'LSTM', 'set the rnncell to use, GRU, LSTM...')
flags.DEFINE_string('activation', 'tanh',
                    'set the activation to use, sigmoid, tanh, relu, elu...')

flags.DEFINE_integer('batch_size', 32, 'set the batch size')
flags.DEFINE_integer('num_hidden', 1024, 'set the hidden size of rnn cell')
flags.DEFINE_bool('use_peepholes', True, 'set whether to use peephole')
flags.DEFINE_integer('feature_length', 39, 'set the size of input feature')
flags.DEFINE_integer('num_classes', 62, 'set the number of output classes')
Пример #18
0
# oracle means task id is input (only suitable for sinusoid)
flags.DEFINE_string('baseline', None, 'oracle, or None')

## Training options
flags.DEFINE_integer('pretrain_iterations', 0, 'number of pre-training iterations.')
flags.DEFINE_integer('metatrain_iterations', 100,
                     'number of metatraining iterations.')  # 15k for omniglot, 50k for sinusoid
flags.DEFINE_integer('meta_batch_size', 1, 'number of tasks sampled per meta-update')
flags.DEFINE_float('meta_lr', 0.001, 'the base learning rate of the generator')
flags.DEFINE_integer('update_batch_size', 5,
                     'number of examples used for inner gradient update (K for K-shot learning).')
flags.DEFINE_float('update_lr', 1e-3, 'step size alpha for inner gradient update.')  # 0.1 for omniglot
flags.DEFINE_integer('num_updates', 1, 'number of inner gradient updates during training.')

## Model options
flags.DEFINE_bool('stop_grad', False, 'if True, do not use second derivatives in meta-optimization (for speed)')

## Logging, saving, and testing options
flags.DEFINE_bool('log', True, 'if false, do not log summaries, for debugging code.')
flags.DEFINE_string('datadir', '/home/ml1323/project/robert_data/DISFA/new_dataset/train/au0/', 'directory for data.')
flags.DEFINE_string('logdir', '/tmp/data', 'directory for summaries and checkpoints.')
flags.DEFINE_bool('resume', True, 'resume training if there is a model available')
flags.DEFINE_bool('train', True, 'True to train, False to test.')
flags.DEFINE_integer('test_iter', -1, 'iteration to load model (-1 for latest model)')
flags.DEFINE_integer('num_test_pts', 1, 'number of iteration to increase the test points')
flags.DEFINE_bool('test_set', False, 'Set to true to test on the the test set, False for the validation set.')
flags.DEFINE_integer('subject_idx', 0, 'subject index to test')
flags.DEFINE_integer('train_update_batch_size', -1,
                     'number of examples used for gradient update during training (use if you want to test with a different number).')
flags.DEFINE_float('train_update_lr', -1,
                   'value of inner gradient step step during training. (use if you want to test with a different value)')  # 0.1 for omniglot
Пример #19
0
import numpy as np

from utils import print_all_var, save_train_batch, recreate_dir
from utils import get_recursive_file_name, show_use_time, get_files_in_dir

from load_data import DataLoader
from im_network_one_gif import BehaviorClone
from config import cfg

_TRAIN_DATA = '../train_data_diff_color_0522/train_data/object_0'
_VALID_DATA = '../train_data_diff_color_0522/valid_data/object_0'
_EPOCHS = 1000
_PRINT_STEP = 100

FLAGS = flags.FLAGS
flags.DEFINE_bool('drop_out', False,
                  'if True, use drop_out for fc(fully connected!')
flags.DEFINE_string('log_dir', 'log/', 'log directory')
flags.DEFINE_string('model_dir', 'checkpoints/', 'model directory')

summary_writer = tf.summary.FileWriter(FLAGS.log_dir)


def get_trainable_dic():
    # print('*******get_trainable_dic*********')
    all_w_b = dict()
    for v in tf.trainable_variables():
        name = v.name.replace(":0", "")
        all_w_b[name] = v
        print('{}->{}'.format(name, v))
    print()
    return all_w_b
Пример #20
0
from filters import stride_3

torch.manual_seed(0)
np.random.seed(0)
tf.set_random_seed(0)

FLAGS = flags.FLAGS


# Dataset Options
flags.DEFINE_string('datasource', 'random',
    'initialization for chains, either random or default (decorruption)')
flags.DEFINE_string('dataset', 'cubes',
    'concept combination (cubes, pairs, pos, continual, color, or cross right now)')
flags.DEFINE_integer('batch_size', 16, 'Size of inputs')
flags.DEFINE_bool('single', False, 'whether to debug by training on a single image')
flags.DEFINE_integer('data_workers', 4,
    'Number of different data workers to load data in parallel')
flags.DEFINE_integer('cond_idx', 0, 'By default, train conditional models on conditioning on position')

# General Experiment Settings
flags.DEFINE_string('logdir', 'cachedir',
    'location where log of experiments will be stored')
flags.DEFINE_string('exp', 'default', 'name of experiments')
flags.DEFINE_integer('log_interval', 10, 'log outputs every so many batches')
flags.DEFINE_integer('save_interval', 1000,'save outputs every so many batches')
flags.DEFINE_integer('test_interval', 1000,'evaluate outputs every so many batches')
flags.DEFINE_integer('resume_iter', -1, 'iteration to resume training from')
flags.DEFINE_bool('train', True, 'whether to train or test')
flags.DEFINE_integer('epoch_num', 10000, 'Number of Epochs to train on')
flags.DEFINE_float('lr', 3e-4, 'Learning for training')
        # You should get exactly the same result for both clean and
        # adversarial accuracy as you get within this program.

    # Calculate training errors
    if testing:
        do_eval(preds2, x_train, y_train, 'train_adv_train_clean_eval')
        do_eval(preds2_adv, x_train, y_train, 'train_adv_train_adv_eval')

    return report


def main(argv=None):
    mnist_tutorial(nb_epochs=FLAGS.nb_epochs, batch_size=FLAGS.batch_size,
                   learning_rate=FLAGS.learning_rate,
                   clean_train=FLAGS.clean_train,
                   backprop_through_attack=FLAGS.backprop_through_attack,
                   nb_filters=FLAGS.nb_filters)


if __name__ == '__main__':
    flags.DEFINE_integer('nb_filters', 64, 'Model size multiplier')
    flags.DEFINE_integer('nb_epochs', 6, 'Number of epochs to train model')
    flags.DEFINE_integer('batch_size', 128, 'Size of training batches')
    flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')
    flags.DEFINE_bool('clean_train', True, 'Train on clean examples')
    flags.DEFINE_bool('backprop_through_attack', False,
                      ('If True, backprop through adversarial example '
                       'construction process during adversarial training'))

    tf.app.run()
Пример #22
0
                     'seconds.')

flags.DEFINE_integer('save_interval_secs', 600,
                     'Frequency in seconds of saving the model.')

flags.DEFINE_integer('max_number_of_steps', int(1e10),
                     'The maximum number of gradient steps.')

flags.DEFINE_string('checkpoint_inception', '',
                    'Checkpoint to recover inception weights from.')

flags.DEFINE_float('clip_gradient_norm', 2.0,
                   'If greater than 0 then the gradients would be clipped by '
                   'it.')

flags.DEFINE_bool('sync_replicas', False,
                  'If True will synchronize replicas during training.')

flags.DEFINE_integer('replicas_to_aggregate', 1,
                     'The number of gradients updates before updating params.')

flags.DEFINE_integer('total_num_replicas', 1,
                     'Total number of worker replicas.')

flags.DEFINE_integer('startup_delay_steps', 15,
                     'Number of training steps between replicas startup.')

flags.DEFINE_boolean('reset_train_dir', False,
                     'If true will delete all files in the train_log_dir')

flags.DEFINE_boolean('show_graph_stats', False,
                     'Output model size stats to stderr.')
Пример #23
0
# oracle means task id is input (only suitable for sinusoid)
flags.DEFINE_string('baseline', None, 'oracle, or None')

## Training options
flags.DEFINE_integer('pretrain_iterations', 0, 'number of pre-training iterations.')
flags.DEFINE_integer('metatrain_iterations', 15000, 'number of metatraining iterations.') # 15k for omniglot, 50k for sinusoid
flags.DEFINE_integer('meta_batch_size', 25, 'number of tasks sampled per meta-update')
flags.DEFINE_float('meta_lr', 0.001, 'the base learning rate of the generator')
flags.DEFINE_integer('update_batch_size', 5, 'number of examples used for inner gradient update (K for K-shot learning).')
flags.DEFINE_float('update_lr', 1e-3, 'step size alpha for inner gradient update.') # 0.1 for omniglot
flags.DEFINE_integer('num_updates', 1, 'number of inner gradient updates during training.')

## Model options
flags.DEFINE_string('norm', 'batch_norm', 'batch_norm, layer_norm, or None')
flags.DEFINE_integer('num_filters', 64, 'number of filters for conv nets -- 32 for miniimagenet, 64 for omiglot.')
flags.DEFINE_bool('conv', True, 'whether or not to use a convolutional network, only applicable in some cases')
flags.DEFINE_bool('max_pool', False, 'Whether or not to use max pooling rather than strided convolutions')
flags.DEFINE_bool('stop_grad', False, 'if True, do not use second derivatives in meta-optimization (for speed)')

## Logging, saving, and testing options
flags.DEFINE_bool('log', True, 'if false, do not log summaries, for debugging code.')
flags.DEFINE_string('logdir', '/tmp/data', 'directory for summaries and checkpoints.')
flags.DEFINE_bool('resume', True, 'resume training if there is a model available')
flags.DEFINE_bool('train', True, 'True to train, False to test.')
flags.DEFINE_integer('test_iter', -1, 'iteration to load model (-1 for latest model)')
flags.DEFINE_bool('test_set', False, 'Set to true to test on the the test set, False for the validation set.')
flags.DEFINE_integer('train_update_batch_size', -1, 'number of examples used for gradient update during training (use if you want to test with a different number).')
flags.DEFINE_float('train_update_lr', -1, 'value of inner gradient step step during training. (use if you want to test with a different value)') # 0.1 for omniglot


def main():
                :nb_samples], Y_test[:nb_samples], args=eval_par)
            t2 = time.time()
            print('Test accuracy on adversarial examples %0.4f\n' % acc)
        print("Took", t2 - t1, "seconds")


if __name__ == '__main__':

    if "CIFAR10_CHALLENGE_DIR" in os.environ:
        cifar10_root = os.environ['CIFAR10_CHALLENGE_DIR']
    default_ckpt_dir = os.path.join(cifar10_root, 'models/adv_trained')
    default_data_dir = os.path.join(cifar10_root, 'cifar10_data')

    flags.DEFINE_integer('batch_size', 100, "Batch size")

    flags.DEFINE_integer('nb_samples', 1000, "Number of samples to test")

    flags.DEFINE_string('attack_type', 'fgsm', ("Attack type: 'fgsm'->'fast "
                                                "gradient sign method', "
                                                "'pgd'->'projected "
                                                "gradient descent', 'cwl2'->"
                                                "'Carlini & Wagner L2'"))
    flags.DEFINE_string('checkpoint_dir', default_ckpt_dir,
                        'Checkpoint directory to load')

    flags.DEFINE_string('dataset_dir', default_data_dir, 'Dataset directory')

    flags.DEFINE_bool('sweep', False, 'Sweep epsilon or single epsilon?')

    app.run(main)
Пример #25
0
flags.DEFINE_integer('meta_train_iterations', 15000,
                     'number of meta-training iterations.')
# batch size during each step of meta-update (testing, validation, training)
flags.DEFINE_integer('meta_batch_size', 25,
                     'number of tasks sampled per meta-update')
flags.DEFINE_float('meta_lr', 0.001, 'the base learning rate of the generator')
flags.DEFINE_integer(
    'k_shot', 1,
    'number of examples used for inner gradient update (K for K-shot learning).'
)
flags.DEFINE_float('inner_update_lr', 0.4,
                   'step size alpha for inner gradient update.')
flags.DEFINE_integer('num_inner_updates', 1,
                     'number of inner gradient updates during meta-training.')
flags.DEFINE_integer('num_filters', 16, 'number of filters for conv nets.')
flags.DEFINE_bool('learn_inner_update_lr', False,
                  'learn the per-layer update learning rate.')

## Logging, saving, and testing options
flags.DEFINE_string('data_path', './omniglot_resized', 'path to the dataset.')
flags.DEFINE_bool('log', True,
                  'if false, do not log summaries, for debugging code.')
flags.DEFINE_string('logdir', '/tmp/data',
                    'directory for summaries and checkpoints.')
flags.DEFINE_bool('resume', False,
                  'resume training if there is a model available')
flags.DEFINE_bool('meta_train', True,
                  'True to meta-train, False to meta-test.')
flags.DEFINE_integer('meta_test_iter', -1,
                     'iteration to load model (-1 for latest model)')
flags.DEFINE_bool(
    'meta_test_set', False,
    str(FLAGS.dataset_number), 'videos')
flags.DEFINE_string(
    'scale_dir',
    '/home/mtheofanidis/catkin_ws/src/Data/Unreal-Dataset/scale_and_bias_%s.pkl'
    % FLAGS.experiment, 'storage')
flags.DEFINE_string(
    'test_dir',
    '/home/mtheofanidis/catkin_ws/src/Data/Unreal-Dataset/Dataset_1',
    'testing demos')
flags.DEFINE_string('temp_dir', None, 'tempory directory for testing')
flags.DEFINE_string(
    'gif_prefix', 'object',
    'prefix of the video directory for each task, e.g. object_0 for task 0')
flags.DEFINE_integer('restore_iter', 0,
                     'iteration to load model (-1 for latest model)')
flags.DEFINE_bool('hsv', False, 'convert the image to HSV format')
flags.DEFINE_bool('use_noisy_demos', False,
                  'use noisy demonstrations or not (for domain shift)')

## Logging, saving, and testing options
flags.DEFINE_bool('log', True,
                  'if false, do not log summaries, for debugging code.')
flags.DEFINE_string('log_dir',
                    '/home/mtheofanidis/catkin_ws/src/network/tmp/data',
                    'summaries and checkpoints.')
flags.DEFINE_bool('resume', True,
                  'resume training if there is a model available')
flags.DEFINE_bool('train', False, 'True to train, False to test.')
flags.DEFINE_integer('test_update_batch_size', 1,
                     'number of demos used during test time')
flags.DEFINE_float('gpu_memory_fraction', 0.5,
        do_eval(preds2_adv, x_train, y_train, 'train_adv_train_adv_eval')

    return report


def main(argv=None):
    from cleverhans_tutorials import check_installation
    check_installation(__file__)

    mnist_tutorial(nb_epochs=FLAGS.nb_epochs,
                   batch_size=FLAGS.batch_size,
                   learning_rate=FLAGS.learning_rate,
                   clean_train=FLAGS.clean_train,
                   backprop_through_attack=FLAGS.backprop_through_attack,
                   nb_filters=FLAGS.nb_filters)


if __name__ == '__main__':
    flags.DEFINE_integer('nb_filters', NB_FILTERS, 'Model size multiplier')
    flags.DEFINE_integer('nb_epochs', NB_EPOCHS,
                         'Number of epochs to train model')
    flags.DEFINE_integer('batch_size', BATCH_SIZE, 'Size of training batches')
    flags.DEFINE_float('learning_rate', LEARNING_RATE,
                       'Learning rate for training')
    flags.DEFINE_bool('clean_train', CLEAN_TRAIN, 'Train on clean examples')
    flags.DEFINE_bool('backprop_through_attack', BACKPROP_THROUGH_ATTACK,
                      ('If True, backprop through adversarial example '
                       'construction process during adversarial training'))

    tf.app.run()
Пример #28
0
    report.adv_train_adv_eval = pgd_acc

    # Save model
    if save:
        model_path = "models/zero_knowledge_gandef"
        vars_to_save = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                         scope='model_zero_knowledge_gandef*')
        assert len(vars_to_save) > 0
        saver = tf.train.Saver(var_list=vars_to_save)
        saver.save(sess, model_path)
        print('Model saved\n')
    else:
        print('Model not saved\n')


def main(argv=None):
    train_zero_knowledge_gandef_model(
        smoke_test=FLAGS.smoke_test,
        save=FLAGS.save,
        backprop_through_attack=FLAGS.backprop_through_attack)


if __name__ == '__main__':
    flags.DEFINE_bool('smoke_test', False, 'Smoke test')
    flags.DEFINE_bool('save', True, 'Save model')
    flags.DEFINE_bool('backprop_through_attack', False,
                      ('If True, backprop through adversarial example '
                       'construction process during adversarial training'))

    tf.app.run()
Пример #29
0
# How often to run a batch through the validation model.
VAL_INTERVAL = 200

# How often to save a model checkpoint
SAVE_INTERVAL = 2000

from prediction_model_sawyer import Prediction_Model

from PIL import Image

FLAGS = flags.FLAGS
flags.DEFINE_string('hyper', '', 'hyperparameters configuration file')
flags.DEFINE_string('visualize', '', 'model within hyperparameter folder from which to create gifs')
flags.DEFINE_integer('device', 0 ,'the value for CUDA_VISIBLE_DEVICES variable')
flags.DEFINE_string('pretrained', None, 'path to model file from which to resume training')
flags.DEFINE_bool('diffmotions', False, 'visualize several different motions for a single scene')



## Helper functions
def peak_signal_to_noise_ratio(true, pred):
    """Image quality metric based on maximal signal power vs. power of the noise.

    Args:
      true: the ground truth image.
      pred: the predicted image.
    Returns:
      peak signal to noise ratio (PSNR)
    """
    return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0)
    return report


def main(argv=None):
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
    global BATCH_SIZE
    if FLAGS.batch_size != BATCH_SIZE:
        BATCH_SIZE = FLAGS.batch_size
    for attacker_name in ATTACKERS.keys():
        generate_CIFAR10_adv(attacker_name=attacker_name, nb_epochs=FLAGS.nb_epochs, batch_size=FLAGS.batch_size,
                             learning_rate=FLAGS.learning_rate,
                             clean_train=FLAGS.clean_train,
                             nb_filters=FLAGS.nb_filters, testing=False, args=FLAGS)


if __name__ == '__main__':
    flags.DEFINE_integer('nb_filters', NB_FILTERS,
                         'Model size multiplier')
    flags.DEFINE_integer('nb_epochs', NB_EPOCHS,
                         'Number of epochs to train model')
    flags.DEFINE_integer('batch_size', BATCH_SIZE,
                         'Size of training batches')
    flags.DEFINE_float('learning_rate', LEARNING_RATE,
                       'Learning rate for training')
    flags.DEFINE_integer('gpu', 0,
                         'GPU for training')
    flags.DEFINE_bool('clean_train', CLEAN_TRAIN, 'Train on clean examples')
    flags.DEFINE_string("resume", CIFAR_MODEL_STORE_PATH, 'store model path')
    tf.app.run()