예제 #1
0
flags.DEFINE_integer('sup_seed', -1,
                     'Integer random seed used for labeled set selection.')

flags.DEFINE_integer('sup_per_batch', 10,
                     'Number of labeled samples per class per batch.')

flags.DEFINE_integer('unsup_batch_size', 1000,
                     'Number of unlabeled samples per batch.')

flags.DEFINE_integer('sup_batch_size', 1000,
                     'Number of labeled samples per batch.')

flags.DEFINE_integer('eval_interval', 500,
                     'Number of steps between evaluations.')

flags.DEFINE_float('learning_rate', 1e-3, 'Initial learning rate.')

flags.DEFINE_float('minimum_learning_rate', 3e-6, 'Final learning rate.')

flags.DEFINE_float('decay_factor', 0.33, 'Learning rate decay factor.')

flags.DEFINE_float('decay_steps', 5000,
                   'Learning rate decay interval in steps.')

flags.DEFINE_float('visit_weight', 1.0, 'Weight for visit loss.')

flags.DEFINE_integer('max_steps', 20000, 'Number of training steps.')

flags.DEFINE_string('logdir', '/tmp/semisup/imagenet', 'Training log path.')

flags.DEFINE_integer('save_summaries_secs', 150,
예제 #2
0
    'manual_checkpoint',
    default=None,
    help='instead of tacking on extra terms, use their exact path')
flags.DEFINE_string('logpath', default='./logs', help='log directory')
flags.DEFINE_bool('serve',
                  default=False,
                  help='export the model to allow for tensorflow serving')
flags.DEFINE_integer('num_files', default=None, help='')
flags.DEFINE_integer('shuffle_files', default=0, help='')
flags.DEFINE_integer('num_epochs', default=None, help='')
flags.DEFINE_list('filenames', default=None, help='')
flags.DEFINE_bool('notify', default=False, help='notify on end')
flags.DEFINE_bool('more_notify', default=False, help='notify on epoch')
flags.DEFINE_bool('plot_preds', default=True, help='plot pred plots')
flags.DEFINE_bool('random_noise', default=True, help='random noise to output')
flags.DEFINE_float('maxval', default=0.1, help='random noise to output')
flags.DEFINE_float('minval', default=0.0, help='random noise to output')
flags.DEFINE_float('noise_std', default=0.02, help='random noise to output')

# Architecture
flags.DEFINE_string('arch', default='vgg', help='')
flags.DEFINE_string('output', default='binned', help='')
flags.DEFINE_integer('coarse_bin', default=64, help='')
#flags.DEFINE_string('loss/output', default='vgg', help='')
flags.DEFINE_bool('coord_all', default=False, help='always use coord convs')
flags.DEFINE_bool('batch_norm', default=False, help='')
flags.DEFINE_bool('ssam', default=False, help='spatial soft-argmax')
flags.DEFINE_bool(
    'softmax',
    default=False,
    help='just softmax right before flattening (only when not using ssam)')
from scipy.misc import imsave
from torchvision import transforms
import os
from itertools import product
from PIL import Image
import torch

flags.DEFINE_integer('batch_size', 256, 'Size of inputs')
flags.DEFINE_integer('data_workers', 4, 'Number of workers to do things')
flags.DEFINE_string('logdir', 'cachedir', 'directory for logging')
flags.DEFINE_string('savedir', 'cachedir',
                    'location where log of experiments will be stored')
flags.DEFINE_integer(
    'num_filters', 64,
    'number of filters for conv nets -- 32 for miniimagenet, 64 for omniglot.')
flags.DEFINE_float('step_lr', 10.0, 'size of gradient descent size')
flags.DEFINE_bool('cclass', True, 'not cclass')
flags.DEFINE_bool('proj_cclass', False,
                  'use for backwards compatibility reasons')
flags.DEFINE_bool('spec_norm', True,
                  'Whether to use spectral normalization on weights')
flags.DEFINE_bool('use_bias', True, 'Whether to use bias in convolution')
flags.DEFINE_bool('use_attention', False,
                  'Whether to use self attention in network')
flags.DEFINE_integer('num_steps', 200, 'number of steps to optimize the label')
flags.DEFINE_string(
    'task', 'negation_figure',
    'conceptcombine, combination_figure, negation_figure, or_figure, negation_eval'
)

flags.DEFINE_bool('eval', False, 'Whether to quantitively evaluate models')
예제 #4
0
파일: utils.py 프로젝트: karimul/f-EBM
""" Utility functions. """
import numpy as np
import os
import random
import tensorflow as tf
import warnings

from tensorflow.contrib.layers.python import layers as tf_layers
from tensorflow.python.platform import flags
from tensorflow.contrib.framework import sort

FLAGS = flags.FLAGS
flags.DEFINE_integer('spec_iter', 1,
                     'Number of iterations to normalize spectrum of matrix')
flags.DEFINE_float('spec_norm_val', 1.0, 'Desired norm of matrices')
flags.DEFINE_bool('downsample', False,
                  'Wheter to do average pool downsampling')
flags.DEFINE_bool('spec_eval', False,
                  'Set to true to prevent spectral updates')


def get_median(v):
    v = tf.reshape(v, [-1])
    m = tf.shape(v)[0] // 2
    return tf.nn.top_k(v, m)[m - 1]


def set_seed(seed):
    import torch
    import numpy
    import random
예제 #5
0
import sklearn
from sklearn import preprocessing

flags.DEFINE_string('wav_file', '',
                    'location of wav file to do prediction with')
flags.DEFINE_string(
    'model_dir', '',
    'path to saved checkpoint of trained model to use for prediction')
flags.DEFINE_integer(
    'max_time_slices', 1000,
    'set the maximum number of 10ms time slices to do processing for, max audio file duration'
)

flags.DEFINE_string('preprocess_mode', 'mfcc',
                    'mfcc or fbank mode for preprocess')
flags.DEFINE_float('winlen', 0.02, 'specify the window length of feature')
flags.DEFINE_float('winstep', 0.01,
                   'specify the window step length of feature')
flags.DEFINE_integer('featlen', 13, 'Features length')

flags.DEFINE_string(
    'level', 'cha',
    'set the task level, phn, cha, or seq2seq, seq2seq will be supported soon')

flags.DEFINE_string('model', 'DBiRNN',
                    'set the model to use, DBiRNN, BiRNN, ResNet..')
flags.DEFINE_string('rnncell', 'lstm',
                    'set the rnncell to use, rnn, gru, lstm...')
flags.DEFINE_integer('num_layer', 2, 'set the layers for rnn')
flags.DEFINE_string('activation', 'tanh',
                    'set the activation to use, sigmoid, tanh, relu, elu...')
예제 #6
0
flags.DEFINE_integer('sup_per_class', 3,
                     'Number of labeled samples used per class.')

flags.DEFINE_integer('sup_seed', -1,
                     'Integer random seed used for labeled set selection.')

flags.DEFINE_integer('sup_per_batch', 3,
                     'Number of labeled samples per class per batch.')

flags.DEFINE_integer('unsup_batch_size', 50,
                     'Number of unlabeled samples per batch.')

flags.DEFINE_integer('eval_interval', 500,
                     'Number of steps between evaluations.')

flags.DEFINE_float('learning_rate', 1e-3, 'Initial learning rate.')

flags.DEFINE_float('decay_factor', 0.33, 'Learning rate decay factor.')

flags.DEFINE_float('decay_steps', 20000,
                   'Learning rate decay interval in steps.')

flags.DEFINE_float('visit_weight', 1, 'Weight for visit loss.')
flags.DEFINE_float('walker_weight', 1, 'Weight for walker loss.')
flags.DEFINE_float('logit_weight', 0.5, 'Weight for logit loss.')
flags.DEFINE_float('l1_weight', 0.001, 'Weight for embedding l1 regularization.')

flags.DEFINE_integer('max_steps', 7000, 'Number of training steps.')
flags.DEFINE_integer('warmup_steps', 3000, 'Number of warmup steps.')

flags.DEFINE_string('logdir', '/tmp/semisup_mnist', 'Training log path.')
                    'set the rnncell to use, rnn, gru, lstm...')
flags.DEFINE_integer('num_layer', 2, 'set the layers for rnn')
flags.DEFINE_string('activation', 'tanh',
                    'set the activation to use, sigmoid, tanh, relu, elu...')
flags.DEFINE_string('optimizer', 'adam',
                    'set the optimizer to use, sgd, adam...')
flags.DEFINE_boolean('layerNormalization', False,
                     'set whether to apply layer normalization to rnn cell')

flags.DEFINE_integer('batch_size', 16, 'set the batch size')
flags.DEFINE_integer('num_hidden', 128, 'set the hidden size of rnn cell')
flags.DEFINE_integer('num_feature', 39, 'set the size of input feature')
flags.DEFINE_integer('num_classes', 30, 'set the number of output classes')
flags.DEFINE_integer('num_epochs', 500, 'set the number of epochs')
flags.DEFINE_integer('num_iter', 3, 'set the number of iterations in routing')
flags.DEFINE_float('lr', 0.0001, 'set the learning rate')
flags.DEFINE_float('dropout_prob', 0.1, 'set probability of dropout')
flags.DEFINE_float(
    'grad_clip', 1,
    'set the threshold of gradient clipping, -1 denotes no clipping')
#flags.DEFINE_string('datadir', '/home/pony/github/data/timit', 'set the data root directory')

#flags.DEFINE_string('datadir', "C:\Research\Corpus\InputFiles\TIMIT_13", 'set the data root directory')
#flags.DEFINE_string('logdir', "C:\Research\Corpus\InputFiles\TIMIT_13\log", 'set the log directory')
flags.DEFINE_string('datadir', "/home/kli/Corpus/InputFiles/TIMIT_13",
                    'set the data root directory')
flags.DEFINE_string('logdir', "/home/kli/Corpus/InputFiles/TIMIT_13/log",
                    'set the log directory')

FLAGS = flags.FLAGS

def main(argv=None):
    baseline_deepfool(nb_epochs=FLAGS.nb_epochs,
                      batch_size=FLAGS.batch_size,
                      learning_rate=FLAGS.learning_rate,
                      clean_train=FLAGS.clean_train,
                      backprop_through_attack=FLAGS.backprop_through_attack,
                      nb_filters=FLAGS.nb_filters,
                      train_start=FLAGS.train_start,
                      train_end=FLAGS.train_end,
                      test_start=FLAGS.test_start,
                      test_end=FLAGS.test_end)


if __name__ == '__main__':
    flags.DEFINE_integer('nb_filters', 64, 'Model size multiplier')
    flags.DEFINE_integer('nb_epochs', 6, 'Number of epochs to train model')
    flags.DEFINE_integer('batch_size', 128, 'Size of training batches')
    flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')
    flags.DEFINE_bool('clean_train', True, 'Train on clean examples')
    flags.DEFINE_bool('backprop_through_attack', False,
                      ('If True, backprop through adversarial example '
                       'construction process during adversarial training'))
    flags.DEFINE_integer('train_start', 1000,
                         'start of MNIST training samples')
    flags.DEFINE_integer('train_end', 1500, 'end of MNIST training samples')
    flags.DEFINE_integer('test_start', 0, 'start of MNIST test samples')
    flags.DEFINE_integer('test_end', 50, 'end of MNIST test samples')

    tf.app.run()
import os
from tensorflow.python.platform import flags
from trainer.meta_LST import MetaTrainer

FLAGS = flags.FLAGS
### Basic Options (the same as MTL)
flags.DEFINE_integer('way_num', 5,
                     'number of classes (e.g. 5-way classification)')
flags.DEFINE_integer('shot_num', 1,
                     'number of examples per class (K for K-shot learning)')
flags.DEFINE_integer('img_size', 84, 'image size')
flags.DEFINE_integer('device_id', 1, 'GPU device ID to run the job.')
flags.DEFINE_float('gpu_rate', 0.9,
                   'the parameter for the full_gpu_memory_mode')
flags.DEFINE_string('phase', 'meta', 'pre or meta')
flags.DEFINE_string('exp_log_label', 'weights_saving_dir',
                    'directory for summaries and checkpoints')
flags.DEFINE_string('logdir_base', './', 'directory for logs')
flags.DEFINE_string(
    'dataset', 'miniImagenet',
    'dataset used in the experiment (miniImagenet or tieredImagenet)')
flags.DEFINE_string('data_path', './', 'directory for dataset')
flags.DEFINE_bool('full_gpu_memory_mode', False,
                  'in this mode, the code occupies GPU memory in advance')
flags.DEFINE_string('exp_name', 'finetune_mini_RN', 'name for the experiment')

### Basic Options used in our experiments
flags.DEFINE_integer(
    'nb_ul_samples', 10,
    'number of unlabeled examples per class (K for K-shot learning)')
flags.DEFINE_integer(
flags.DEFINE_bool('train', True, 'true to train, false to test')
flags.DEFINE_string('net_arch', 'vgg19', 'embedding network architecture')
flags.DEFINE_bool(
    'double_stream_mode', True,
    'true to use double stream framework, false to use single stream framework'
)
flags.DEFINE_bool(
    'finetune_mode', True,
    'true to finetune from pre-trained model, fasle to train from scratch')
flags.DEFINE_integer('epoch_num', 10, 'number of epochs')
flags.DEFINE_integer('batch_size', 10,
                     'number of samples trained in a single batch')
flags.DEFINE_integer('img_resize', 224,
                     'resize images to a specific resolution')
flags.DEFINE_integer('cls_num', 16, '5 for relation, 16 for domain')
flags.DEFINE_float('learning_rate', 0.00001, 'the learning rate')
flags.DEFINE_bool('shuffle_dataset', True,
                  'shuffle the dataset before training or not')
flags.DEFINE_bool('load_ckpt', False, 'load checkpoint or not')
flags.DEFINE_string('log_label', 'experiment_01', 'the label for ckpt saving')
flags.DEFINE_string('img_list1',
                    './data_label_splits/example/single_body1_train_16.txt',
                    'the directory for the first image list')
flags.DEFINE_string('img_list2',
                    './data_label_splits/example/single_body2_train_16.txt',
                    'the directory for the second image list')
flags.DEFINE_string('pretrain_model_dir', './pre_model.npy',
                    'the directory for the pre-trained model')


def main():
예제 #11
0
    # model-specific config
    flags.DEFINE_string(
        "model_name", "LSTMModel",
        "Which architecture to use for the model. Models are defined "
        "in models package")
    flags.DEFINE_string("model_output_dir", "./model_output_dir/",
                        "The directory to save the model files in.")
    flags.DEFINE_string("feature_names", "audio_embedding", "Name of the feature "
                                                            "to use for training.")

    # Other unused (for now) flags..
    flags.DEFINE_string("label_loss", "CrossEntropyLoss",
                        "Which loss function to use for training the model.")
    flags.DEFINE_float("regularization_penalty", 1.0,
                       "How much weight to give to the regularization loss (the label loss has "
                       "a weight of 1).")
    flags.DEFINE_float("base_learning_rate", 0.01,
                       "Which learning rate to start with.")
    flags.DEFINE_float("learning_rate_decay", 0.95,
                       "Learning rate decay factor to be applied every "
                       "learning_rate_decay_examples.")
    flags.DEFINE_float("learning_rate_decay_examples", 4000000,
                       "Multiply current learning rate by learning_rate_decay "
                       "every learning_rate_decay_examples.")
    # flags.DEFINE_integer("export_model_steps", 1000,
    #                      "The period, in number of steps, with which the model "
    #                      "is exported for batch prediction.")
    flags.DEFINE_string("optimizer", "AdamOptimizer",
                        "What optimizer class to use.")
    flags.DEFINE_float("clip_gradient_norm", 1.0, "Norm to clip gradients to.")
예제 #12
0
파일: train.py 프로젝트: bbeatrix/ebm_cl
    'Number of different data workers to load data in parallel')

# General Experiment Settings
flags.DEFINE_string('logdir', 'cachedir',
                    'location where log of experiments will be stored')
flags.DEFINE_string('exp', 'default', 'name of experiments')
flags.DEFINE_integer('log_interval', 10, 'log outputs every so many batches')
flags.DEFINE_integer('save_interval', 1000,
                     'save outputs every so many batches')
flags.DEFINE_integer('test_interval', 1000,
                     'evaluate outputs every so many batches')
flags.DEFINE_integer('resume_iter', -1, 'iteration to resume training from')
flags.DEFINE_bool('train', True, 'whether to train or test')
flags.DEFINE_bool('evaluate', True, 'whether to eval')
flags.DEFINE_integer('epoch_num', 10000, 'Number of Epochs to train on')
flags.DEFINE_float('lr', 3e-4, 'Learning for training')
flags.DEFINE_integer('num_gpus', 1, 'number of gpus to train on')

# EBM Specific Experiments Settings
flags.DEFINE_float('ml_coeff', 1.0, 'Maximum Likelihood Coefficients')
flags.DEFINE_float('l2_coeff', 1.0, 'L2 Penalty training')
flags.DEFINE_bool('cclass', False, 'Whether to conditional training in models')
flags.DEFINE_bool('model_cclass', False,
                  'use unsupervised clustering to infer fake labels')
flags.DEFINE_integer('temperature', 1, 'Temperature for energy function')
flags.DEFINE_string(
    'objective', 'cd',
    'use either contrastive divergence objective(least stable),'
    'logsumexp(more stable)'
    'softplus(most stable)')
flags.DEFINE_bool('zero_kl', False, 'whether to zero out the kl loss')
예제 #13
0
          format(mutated_accuracy))

    # if mutated_accuracy >= threshold * accuracy:
    #     train_dir = os.path.join(path.mu_model_path, 'ns', dataset + '_' + model_name, '0')
    #     if not os.path.exists(train_dir):
    #         os.makedirs(train_dir)
    #     save_path = os.path.join(train_dir, datasets + '_' + model_name + '.model')
    #     saver = tf.train.Saver()
    #     saver.save(sess, save_path)

    sess.close()


def main(argv=None):
    ns(dataset=FLAGS.dataset,
       sens_param=FLAGS.sens_param,
       ration=FLAGS.ration,
       threshold=FLAGS.threshold)


if __name__ == '__main__':
    flags.DEFINE_string('dataset', 'census', 'The target datasets.')
    flags.DEFINE_integer(
        'sens_param', 9,
        'sensitive index, index start from 1, 9 for gender, 8 for race.')
    flags.DEFINE_float('ration', 0.1, 'The ration of mutated neurons.')
    flags.DEFINE_float('threshold', 0.9,
                       'The threshold of accuacy compared with original.')

    tf.app.run()
flags.DEFINE_boolean(
    'select', True,
    'Select correctly classified examples for the experiement.')
flags.DEFINE_integer('nb_examples', 100,
                     'The number of examples selected for attacks.')
flags.DEFINE_boolean('balance_sampling', False,
                     'Select the same number of examples for each class.')
flags.DEFINE_boolean('test_mode', False,
                     'Only select one sample for each class.')

flags.DEFINE_string(
    'attacks',
    "FGSM?eps=0.1;BIM?eps=0.1&eps_iter=0.02;JSMA?targeted=next;CarliniL2?targeted=next&batch_size=100&max_iterations=1000;CarliniL2?targeted=next&batch_size=100&max_iterations=1000&confidence=2",
    'Attack name and parameters in URL style, separated by semicolon.')
flags.DEFINE_float('clip', -1,
                   'L-infinity clip on the adversarial perturbations.')
flags.DEFINE_boolean(
    'visualize', True,
    'Output the image examples for each attack, enabled by default.')

flags.DEFINE_string('robustness', '', 'Supported: FeatureSqueezing.')

flags.DEFINE_string('detection', '', 'Supported: feature_squeezing.')
flags.DEFINE_boolean('detection_train_test_mode', True,
                     'Split into train/test datasets.')

flags.DEFINE_string('result_folder', "results",
                    'The output folder for results.')
flags.DEFINE_boolean(
    'verbose', False,
    'Stdout level. The hidden content will be saved to log files anyway.')
예제 #15
0
flags.DEFINE_bool('dshape_only', False, 'fix all factors except for shapes')
flags.DEFINE_bool('dpos_only', False,
                  'fix all factors except for positions of shapes')
flags.DEFINE_bool('dsize_only', False,
                  'fix all factors except for size of objects')
flags.DEFINE_bool('drot_only', False,
                  'fix all factors except for rotation of objects')
flags.DEFINE_bool('dsprites_restrict', False,
                  'fix all factors except for rotation of objects')
flags.DEFINE_string('imagenet_path', '/root/imagenet',
                    'path to imagenet images')

# Data augmentation options
flags.DEFINE_bool('cutout_inside', False,
                  'whether cutoff should always in image')
flags.DEFINE_float('cutout_prob', 1.0, 'probability of using cutout')
flags.DEFINE_integer('cutout_mask_size', 16, 'size of cutout')
flags.DEFINE_bool('cutout', False, 'whether to add cutout regularizer to data')


def cutout(mask_color=(0, 0, 0)):
    mask_size_half = FLAGS.cutout_mask_size // 2
    offset = 1 if FLAGS.cutout_mask_size % 2 == 0 else 0

    def _cutout(image):
        image = np.asarray(image).copy()

        if np.random.random() > FLAGS.cutout_prob:
            return image

        h, w = image.shape[:2]
def define():
    """Define common flags."""
    # yapf: disable
    flags.DEFINE_integer('batch_size', 32,
                         'Batch size.')

    flags.DEFINE_integer('crop_width', None,
                         'Width of the central crop for images.')

    flags.DEFINE_integer('crop_height', None,
                         'Height of the central crop for images.')

    flags.DEFINE_string('train_log_dir', '/tmp/attention_ocr/train',
                        'Directory where to write event logs.')

    flags.DEFINE_string('dataset_name', 'fsns',
                        'Name of the dataset. Supported: fsns')

    flags.DEFINE_string('split_name', 'train',
                        'Dataset split name to run evaluation for: test,train.')

    flags.DEFINE_string('dataset_dir', None,
                        'Dataset root folder.')

    flags.DEFINE_string('checkpoint', '',
                        'Path for checkpoint to restore weights from.')

    flags.DEFINE_string('master',
                        '',
                        'BNS name of the TensorFlow master to use.')

    # Model hyper parameters
    flags.DEFINE_float('learning_rate', 0.004,
                       'learning rate')

    flags.DEFINE_string('optimizer', 'momentum',
                        'the optimizer to use')

    flags.DEFINE_string('momentum', 0.9,
                        'momentum value for the momentum optimizer if used')

    flags.DEFINE_bool('use_augment_input', True,
                      'If True will use image augmentation')

    # Method hyper parameters
    # conv_tower_fn
    flags.DEFINE_string('final_endpoint', 'Mixed_5d',
                        'Endpoint to cut inception tower')

    # sequence_logit_fn
    flags.DEFINE_bool('use_attention', True,
                      'If True will use the attention mechanism')

    flags.DEFINE_bool('use_autoregression', True,
                      'If True will use autoregression (a feedback link)')

    flags.DEFINE_integer('num_lstm_units', 256,
                         'number of LSTM units for sequence LSTM')

    flags.DEFINE_float('weight_decay', 0.00004,
                       'weight decay for char prediction FC layers')

    flags.DEFINE_float('lstm_state_clip_value', 10.0,
                       'cell state is clipped by this value prior to the cell'
                       ' output activation')

    # 'sequence_loss_fn'
    flags.DEFINE_float('label_smoothing', 0.1,
                       'weight for label smoothing')

    flags.DEFINE_bool('ignore_nulls', True,
                      'ignore null characters for computing the loss')

    flags.DEFINE_bool('average_across_timesteps', False,
                      'divide the returned cost by the total label weight')
예제 #17
0
  if viz_enabled:
    import matplotlib.pyplot as plt
    plt.close(figure)
    _ = grid_visual_mnist(grid_viz_data, jsma_params['gamma'])
  return report


def main(argv=None):
  from cleverhans_tutorials import check_installation
  check_installation(__file__)

  mnist_tutorial_jsma(viz_enabled=FLAGS.viz_enabled,
                      nb_epochs=FLAGS.nb_epochs,
                      batch_size=FLAGS.batch_size,
                      source_samples=FLAGS.source_samples,
                      learning_rate=FLAGS.learning_rate)


if __name__ == '__main__':
  flags.DEFINE_boolean('viz_enabled', VIZ_ENABLED,
                       'Visualize adversarial ex.')
  flags.DEFINE_integer('nb_epochs', NB_EPOCHS,
                       'Number of epochs to train model')
  flags.DEFINE_integer('batch_size', BATCH_SIZE, 'Size of training batches')
  flags.DEFINE_integer('source_samples', SOURCE_SAMPLES,
                       'Nb of test inputs to attack')
  flags.DEFINE_float('learning_rate', LEARNING_RATE,
                     'Learning rate for training')

  tf.app.run()
예제 #18
0
                    'Which network architecture to use')
flags.DEFINE_integer('n_input', 33, 'Number of nodes in the input layer')
flags.DEFINE_integer('n_hidden_1', 40,
                     'Number of nodes in the first auto encoder hidden layer')
flags.DEFINE_integer(
    'n_hidden_2', 30,
    'Number of nodes in the second auto encoder hidden layer')
flags.DEFINE_integer('n_hidden_3', 50,
                     'Number of nodes in the fully connected layer')
flags.DEFINE_integer('n_output', 2, 'Number of nodes in the output layer')
flags.DEFINE_integer('n_epoch_ae', 10,
                     'Max Number of epochs to train autoencoder')
flags.DEFINE_integer('n_epoch_all', 10,
                     'Max Number of epochs to train all graph')
flags.DEFINE_integer('batch_size', 256, 'Number of samples per batch')
flags.DEFINE_float('learning_rate', 0.0002, 'Leearning rate')
flags.DEFINE_integer('patience', 10, 'Patience count for early stopping')

f = open('mom.pickle', 'rb')
data_pickle = pickle.load(f)
mom_data = data_pickle['data']
mom_label = data_pickle['label']

train_data = tf.data.Dataset.from_tensor_slices(
    mom_data[:int(len(mom_data) * 0.8)])
train_label = tf.data.Dataset.from_tensor_slices(
    mom_label[:int(len(mom_label) *
                   0.8)]).map(lambda z: tf.one_hot(tf.cast(z, tf.int32), 2))
train_dataset = tf.data.Dataset.zip(
    (train_data, train_label)).shuffle(10000).repeat().batch(FLAGS.batch_size)
예제 #19
0
# Dataset Specifications:
flags.DEFINE_string(
    'img_dir',
    '/Users/subir/Codes/DeepLearning/DeepLearning-Playground/ImageClassification/Snakes in '
    'Hood/dataset/train/',
    'Directory where all the training images are irrespective of classes')
flags.DEFINE_string(
    'dataframe',
    '/Users/subir/Codes/DeepLearning/DeepLearning-Playground/ImageClassification/Snakes '
    'in Hood/dataset/train.csv',
    'CSV file containing image names and filenames')

flags.DEFINE_integer('img_width', 299, 'Training Image Width')
flags.DEFINE_integer('img_height', 299, 'Training Image Height')
flags.DEFINE_integer('batch_size', 32, 'Batch Size for Minibatches')
flags.DEFINE_float('validation_split', 0.1, 'Train-Test Validation Split')
flags.DEFINE_bool('stratify', True, 'Stratify training set')

# Model Specifications:
flags.DEFINE_string('model', 'EfficentNetB7', 'Backbone Feature Training')
flags.DEFINE_integer('no_of_classes', 35, 'Number of Output Classes')
flags.DEFINE_string('optimizer', 'adam', 'Optimizer in training routine')

# Runtime Specifications:
flags.DEFINE_integer('epochs', 2, 'Number of Epochs to train')
flags.DEFINE_integer('verbose', 1, 'Verbosity at Training Time')
flags.DEFINE_bool('model_summary', True, 'Print Model Summary')
flags.DEFINE_bool('finetune', False, 'True: Transfer learning, False: ')

# Tensorboard Parameters
flags.DEFINE_bool('enable_log', True, 'Whether to log the training process')
예제 #20
0
flags.DEFINE_integer('save_summaries_secs', 60,
                     'The frequency with which summaries are saved, in '
                     'seconds.')

flags.DEFINE_integer('save_interval_secs', 600,
                     'Frequency in seconds of saving the model.')

flags.DEFINE_integer('max_number_of_steps', int(1e10),
                     'The maximum number of gradient steps.')

flags.DEFINE_string('checkpoint_inception', '',
                    'Checkpoint to recover inception weights from.')

flags.DEFINE_float('clip_gradient_norm', 2.0,
                   'If greater than 0 then the gradients would be clipped by '
                   'it.')

flags.DEFINE_bool('sync_replicas', False,
                  'If True will synchronize replicas during training.')

flags.DEFINE_integer('replicas_to_aggregate', 1,
                     'The number of gradients updates before updating params.')

flags.DEFINE_integer('total_num_replicas', 1,
                     'Total number of worker replicas.')

flags.DEFINE_integer('startup_delay_steps', 15,
                     'Number of training steps between replicas startup.')

flags.DEFINE_boolean('reset_train_dir', False,
예제 #21
0

def main(argv=None):
    mnist_tutorial(nb_epochs=FLAGS.nb_epochs,
                   batch_size=FLAGS.batch_size,
                   learning_rate=FLAGS.learning_rate,
                   clean_train=FLAGS.clean_train,
                   backprop_through_attack=FLAGS.backprop_through_attack,
                   nb_filters=FLAGS.nb_filters)


if __name__ == '__main__':
    flags.DEFINE_integer('nb_filters', 32, 'Model size multiplier')
    flags.DEFINE_integer('nb_epochs', 12, 'Number of epochs to train model')
    flags.DEFINE_integer('batch_size', 1024, 'Size of training batches')
    flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')
    flags.DEFINE_bool('clean_train', True, 'Train on clean examples')
    flags.DEFINE_bool('backprop_through_attack', False,
                      ('If True, backprop through adversarial example '
                       'construction process during adversarial training'))
    flags.DEFINE_integer('retrain_epoch', 2,
                         'Number of retrain before next pruning')
    flags.DEFINE_float('fgsm_eps', 0.3, 'eps for fgsm')
    flags.DEFINE_bool(
        'use_inhibition_original', False,
        'true if you want to use original inhibition method. False if you want to use my modified version'
    )
    flags.DEFINE_integer('prune_iterations', 20,
                         'number of iteration for iterative pruning.')
    flags.DEFINE_float('retrain_lr', 1e-3, 'lr for retraining')
    flags.DEFINE_float('prune_factor', 10,
예제 #22
0
파일: main.py 프로젝트: RSIP4SH/MAML-2
import random
import tensorflow as tf
from tensorflow.python.platform import flags
import pickle
import csv
from sin_wave_generation import sinGen
from MAML import MAML

Flags = flags.FLAGS

flags.DEFINE_string('datasource', 'sinusoid', 'sinusoid')
flags.DEFINE_integer('metatrain_iterations', 15000,
                     'number of metatraining iterations.')
flags.DEFINE_integer('meta_batch_size', 25,
                     'number of tasks sampled per meta-update')
flags.DEFINE_float('meta_lr', 0.001, 'the base learning rate of the generator')
flags.DEFINE_integer(
    'update_batch_size', 10,
    'number of examples used for inner gradient update (K for K-shot learning).'
)
flags.DEFINE_float('update_lr', 1e-3,
                   'step size alpha for inner gradient update.')
flags.DEFINE_bool('train', True, 'True to train, False to test.')
flags.DEFINE_bool('test', True, 'True to train, False to test.')
flags.DEFINE_integer('testNum', 1000, 'number of inner gradient update')
flags.DEFINE_integer('num_updates', 1, 'number of inner gradient update')
flags.DEFINE_string('norm', 'None', 'batch_norm, layer_norm, or None')
flags.DEFINE_string("summaries_dir", "tensorBroadLog", "TensorBroad location")
flags.DEFINE_string("logdir", "model", "Model location")

flags.DEFINE_bool("resume_model", True, "resume previous training model")
예제 #23
0
from tensorflow.python.platform import flags

FLAGS = flags.FLAGS

## Dataset/method options
flags.DEFINE_integer(
    'n_way', 5,
    'number of classes used in classification (e.g. 5-way classification).')

## Training options
flags.DEFINE_integer('meta_train_iterations', 15000,
                     'number of meta-training iterations.')
# batch size during each step of meta-update (testing, validation, training)
flags.DEFINE_integer('meta_batch_size', 25,
                     'number of tasks sampled per meta-update')
flags.DEFINE_float('meta_lr', 0.001, 'the base learning rate of the generator')
flags.DEFINE_integer(
    'k_shot', 1,
    'number of examples used for inner gradient update (K for K-shot learning).'
)
flags.DEFINE_float('inner_update_lr', 0.4,
                   'step size alpha for inner gradient update.')
flags.DEFINE_integer('num_inner_updates', 1,
                     'number of inner gradient updates during meta-training.')
flags.DEFINE_integer('num_filters', 16, 'number of filters for conv nets.')
flags.DEFINE_bool('learn_inner_update_lr', False,
                  'learn the per-layer update learning rate.')

## Logging, saving, and testing options
flags.DEFINE_string('data_path', './omniglot_resized', 'path to the dataset.')
flags.DEFINE_bool('log', True,
예제 #24
0
flags.DEFINE_bool('use_noisy_demos', False, 'use noisy demonstrations or not (for domain shift)')
flags.DEFINE_string('noisy_demo_gif_dir', None, 'path to the videos of noisy demonstrations')
flags.DEFINE_string('noisy_demo_file', None, 'path to the directory where noisy demo files that containing robot states and actions are stored')
flags.DEFINE_bool('no_action', False, 'do not include actions in the demonstrations for inner update')
flags.DEFINE_bool('no_state', False, 'do not include states in the demonstrations during training')
flags.DEFINE_bool('no_final_eept', False, 'do not include final ee pos in the demonstrations for inner update')
flags.DEFINE_bool('zero_state', False, 'zero-out states (meta-learn state) in the demonstrations for inner update')
flags.DEFINE_bool('two_arms', False, 'use two-arm structure when state is zeroed-out')
flags.DEFINE_integer('training_set_size', 693, 'size of the training set, 1500 for sim_reach, 693 for sim push, and \
                                                -1 for all data except those in validation set')
flags.DEFINE_integer('val_set_size', 76, 'size of the training set, 150 for sim_reach and 76 for sim push')

## Training options
flags.DEFINE_integer('metatrain_iterations', 30000, 'number of metatraining iterations.') # 30k for pushing, 50k for reaching and placing
flags.DEFINE_integer('meta_batch_size', 15, 'number of tasks sampled per meta-update') # 5 for reaching, 15 for pushing, 12 for placing
flags.DEFINE_float('meta_lr', 0.001, 'the base learning rate of the generator')
flags.DEFINE_integer('update_batch_size', 1, 'number of examples used for inner gradient update (K for K-shot learning).')
flags.DEFINE_float('train_update_lr', .01, 'step size alpha for inner gradient update.') # 0.001 for reaching, 0.01 for pushing and placing
flags.DEFINE_integer('num_updates', 1, 'number of inner gradient updates during training.') # 5 for placing
flags.DEFINE_bool('clip', False, 'use gradient clipping for fast gradient')
flags.DEFINE_float('clip_max', 10.0, 'maximum clipping value for fast gradient')
flags.DEFINE_float('clip_min', -10.0, 'minimum clipping value for fast gradient')
flags.DEFINE_bool('fc_bt', True, 'use bias transformation for the first fc layer')
flags.DEFINE_bool('all_fc_bt', False, 'use bias transformation for all fc layers')
flags.DEFINE_bool('conv_bt', True, 'use bias transformation for the first conv layer, N/A for using pretraining')
flags.DEFINE_integer('bt_dim', 10, 'the dimension of bias transformation for FC layers')
flags.DEFINE_string('pretrain_weight_path', 'N/A', 'path to pretrained weights') # pretrained using MIL to be fine-tuned with gradient step
flags.DEFINE_bool('train_pretrain_conv1', False, 'whether to finetune the pretrained weights')
flags.DEFINE_bool('two_head', False, 'use two-head architecture')
flags.DEFINE_bool('learn_final_eept', False, 'learn an auxiliary loss for predicting final end-effector pose')
flags.DEFINE_bool('learn_final_eept_whole_traj', False, 'learn an auxiliary loss for predicting final end-effector pose \
예제 #25
0
def main(argv=None):
    mnist_blackbox(nb_classes=FLAGS.nb_classes,
                   batch_size=FLAGS.batch_size,
                   learning_rate=FLAGS.learning_rate,
                   nb_epochs=FLAGS.nb_epochs,
                   holdout=FLAGS.holdout,
                   data_aug=FLAGS.data_aug,
                   nb_epochs_s=FLAGS.nb_epochs_s,
                   lmbda=FLAGS.lmbda,
                   aug_batch_size=FLAGS.data_aug_batch_size)


if __name__ == '__main__':
    # General flags
    flags.DEFINE_integer('nb_classes', 10, 'Number of classes in problem')
    flags.DEFINE_integer('batch_size', 128, 'Size of training batches')
    flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')

    # Flags related to oracle
    flags.DEFINE_integer('nb_epochs', 10, 'Number of epochs to train model')

    # Flags related to substitute
    flags.DEFINE_integer('holdout', 150, 'Test set holdout for adversary')
    flags.DEFINE_integer('data_aug', 6, 'Nb of substitute data augmentations')
    flags.DEFINE_integer('nb_epochs_s', 10, 'Training epochs for substitute')
    flags.DEFINE_float('lmbda', 0.1, 'Lambda from arxiv.org/abs/1602.02697')
    flags.DEFINE_integer('data_aug_batch_size', 512,
                         'Batch size for augmentation')

    tf.app.run()
예제 #26
0
flags.DEFINE_integer('unsup_samples', -1,
                     'Number of unlabeled samples used in total. -1 = all.')

flags.DEFINE_integer('sup_seed', -1,
                     'Integer random seed used for labeled set selection.')

flags.DEFINE_integer('sup_per_batch', 10,
                     'Number of labeled samples per class per batch.')

flags.DEFINE_integer('unsup_batch_size', 100,
                     'Number of unlabeled samples per batch.')

flags.DEFINE_integer('emb_size', 128,
                     'Size of the embeddings to learn.')

flags.DEFINE_float('learning_rate', 1e-4, 'Initial learning rate.')

flags.DEFINE_float('minimum_learning_rate', 1e-6,
                   'Lower bound for learning rate.')

flags.DEFINE_float('decay_factor', 0.33, 'Learning rate decay factor.')

flags.DEFINE_float('decay_steps', 60000,
                   'Learning rate decay interval in steps.')

flags.DEFINE_float('visit_weight', 0.0, 'Weight for visit loss.')

flags.DEFINE_string('visit_weight_envelope', None,
                    'Increase visit weight with an envelope: [None, sigmoid, linear]')

flags.DEFINE_integer('visit_weight_envelope_steps', -1,
예제 #27
0
flags.DEFINE_string('pretrained_model', '',
                                        'filepath of a pretrained model to initialize from.')

flags.DEFINE_integer('sequence_length', 3,
                                         'sequence length, including context frames.')
flags.DEFINE_integer('context_frames', 2, '# of frames before predictions.')
flags.DEFINE_integer('use_action', 1,
                                         'Whether or not to give the action to the model')

flags.DEFINE_string('model', 'CDNA',
                                        'model architecture to use - CDNA, DNA, or STP')

flags.DEFINE_integer('num_masks', 10,
                                         'number of masks, usually 1 for DNA, 10 for CDNA, STN.')
flags.DEFINE_float('schedsamp_k', 900.0,
                                     'The k hyperparameter for scheduled sampling,'
                                     '-1 for no scheduled sampling.')
flags.DEFINE_float('train_val_split', 0.95,
                                     'The percentage of files to use for the training set,'
                                     ' vs. the validation set.')

flags.DEFINE_integer('batch_size', 32, 'batch size for training')
flags.DEFINE_float('learning_rate', 0.001,
                                     'the base learning rate of the generator')


## Helper functions
def peak_signal_to_noise_ratio(true, pred):
    """Image quality metric based on maximal signal power vs. power of the noise.

    Args:
}

FLAGS = flags.FLAGS
flags.DEFINE_integer('batch_size', 125, 'Size of training batches')
flags.DEFINE_string('dataset', 'cifar10', 'dataset: cifar10/100 or svhn')
flags.DEFINE_string('attack', 'deepfool', 'adversarial attack: deepfool, jsma, cw, cw_nnif')
flags.DEFINE_string('characteristics', 'nnif', 'type of defence: lid/mahalanobis/dknn/nnif')
flags.DEFINE_bool('with_noise', False, 'whether or not to include noisy samples')
flags.DEFINE_bool('only_last', False, 'Using just the last layer, the embedding vector')
flags.DEFINE_string('checkpoint_dir', '', 'Checkpoint dir, the path to the saved model architecture and weights')

# FOR DkNN and LID
flags.DEFINE_integer('k_nearest', -1, 'number of nearest neighbors to use for LID/DkNN detection')

# FOR MAHANABOLIS
flags.DEFINE_float('magnitude', -1, 'magnitude for mahalanobis detection')

# FOR NNIF
flags.DEFINE_integer('max_indices', -1, 'maximum number of helpful indices to use in NNIF detection')
flags.DEFINE_string('ablation', '1111', 'for ablation test')

#TODO: remove when done debugging
flags.DEFINE_string('mode', 'null', 'to bypass pycharm bug')
flags.DEFINE_string('port', 'null', 'to bypass pycharm bug')

assert FLAGS.with_noise is False  # TODO(support noise in the future)
rgb_scale = 1.0  # Used for the Mahalanobis detection

if FLAGS.set == 'val':
    test_val_set = True  # evaluating on the validation set
    WORKSPACE = 'influence_workspace_validation'
예제 #29
0
                    'sinusoid or omniglot or miniimagenet')
flags.DEFINE_integer(
    'num_classes', 5,
    'number of classes used in classification (e.g. 5-way classification).')
# oracle means task id is input (only suitable for sinusoid)
flags.DEFINE_string('baseline', None, 'oracle, or None')

## Training options
flags.DEFINE_integer('pretrain_iterations', 0,
                     'number of pre-training iterations.')
flags.DEFINE_integer(
    'metatrain_iterations', 15000,
    'number of metatraining iterations.')  # 15k for omniglot, 50k for sinusoid
flags.DEFINE_integer('meta_batch_size', 25,
                     'number of tasks sampled per meta-update')
flags.DEFINE_float('meta_lr', 0.001, 'the base learning rate of the generator')
flags.DEFINE_integer(
    'update_batch_size', 5,
    'number of examples used for inner gradient update (K for K-shot learning).'
)
flags.DEFINE_float(
    'update_lr', 1e-3,
    'step size alpha for inner gradient update.')  # 0.1 for omniglot
flags.DEFINE_integer('num_updates', 1,
                     'number of inner gradient updates during training.')

## Model options
flags.DEFINE_string('norm', 'batch_norm', 'batch_norm, layer_norm, or None')
flags.DEFINE_integer(
    'num_filters', 64,
    'number of filters for conv nets -- 32 for miniimagenet, 64 for omiglot.')
예제 #30
0
import tensorflow as tf
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile

import moviepy.editor as mpy

from PIL import Image

FLAGS = flags.FLAGS

flags.DEFINE_integer('sequence_length', 15, 'sequence length, including context frames.')
flags.DEFINE_integer('context_frames', 2, '# of frames before predictions.')
flags.DEFINE_integer('use_state', 1, 'Whether or not to give the state+action to the model')
flags.DEFINE_integer('batch_size', 8, 'batch size for training')
flags.DEFINE_float('train_val_split', 1,
                   'The percentage of files to use for the training set,'
                   ' vs. the validation set.')

# Original image dimensions
ORIGINAL_WIDTH = 128
ORIGINAL_HEIGHT = 128
COLOR_CHAN = 3

# Default image dimensions.
IMG_WIDTH = 128
IMG_HEIGHT = 128

# Dimension of the state and action.
STATE_DIM = 4
ACION_DIM = 2