import torch torch.manual_seed(1) from util import adjust_learning_rate, AverageMeter, accuracy from tensorflow.python.platform import flags import torch.nn as nn import sys import torch.optim as optim import tensorboard_logger as tb_logger import torchvision.models as models import torchvision.models as models FLAGS = flags.FLAGS flags.DEFINE_bool('places_full', False, 'use all of places') flags.DEFINE_float('learning_rate', 0.1, 'learning rate') flags.DEFINE_list('lr_decay_epochs', [30, 40, 50], 'epochs to decay learning rate') flags.DEFINE_string('mode', 'crl', 'type of model to load') flags.DEFINE_bool('policy', False, 'whether to use model or policy') class PlacesLinear(nn.Module): def __init__(self, classes): super(PlacesLinear, self).__init__() self.fc = nn.Linear(2048, classes) def forward(self, inp): logits = self.fc(inp) return logits
flags.DEFINE_string('real_data_shape', default='asus', help='') flags.DEFINE_string('checkpoint', default='checkpoint', help='checkpoint directory') flags.DEFINE_string( 'manual_checkpoint', default=None, help='instead of tacking on extra terms, use their exact path') flags.DEFINE_string('logpath', default='./logs', help='log directory') flags.DEFINE_bool('serve', default=False, help='export the model to allow for tensorflow serving') flags.DEFINE_integer('num_files', default=None, help='') flags.DEFINE_integer('shuffle_files', default=0, help='') flags.DEFINE_integer('num_epochs', default=None, help='') flags.DEFINE_list('filenames', default=None, help='') flags.DEFINE_bool('notify', default=False, help='notify on end') flags.DEFINE_bool('more_notify', default=False, help='notify on epoch') flags.DEFINE_bool('plot_preds', default=True, help='plot pred plots') flags.DEFINE_bool('random_noise', default=True, help='random noise to output') flags.DEFINE_float('maxval', default=0.1, help='random noise to output') flags.DEFINE_float('minval', default=0.0, help='random noise to output') flags.DEFINE_float('noise_std', default=0.02, help='random noise to output') # Architecture flags.DEFINE_string('arch', default='vgg', help='') flags.DEFINE_string('output', default='binned', help='') flags.DEFINE_integer('coarse_bin', default=64, help='') #flags.DEFINE_string('loss/output', default='vgg', help='') flags.DEFINE_bool('coord_all', default=False, help='always use coord convs') flags.DEFINE_bool('batch_norm', default=False, help='')
batch_size=FLAGS.batch_size, learning_rate=FLAGS.learning_rate, clean_train=FLAGS.clean_train, backprop_through_attack=FLAGS.backprop_through_attack, nb_filters=FLAGS.nb_filters) if __name__ == '__main__': flags.DEFINE_float('label_smooth', 0.1, ("Amount to subtract from correct label " "and distribute among other labels")) flags.DEFINE_list('attack_type', ['fgsm', 'pgd'], ("Attack type: 'fgsm'->'fast gradient sign method', " "'pgd'->'projected gradient descent', " "'bim'->'basic iterative method'," "'cwl2'->'Carlini & Wagner L2'," "'jsma'->'jsma method'")) flags.DEFINE_string('dataset', 'mnist', ("dataset: 'mnist'->'mnist dataset', " "'fmnist'->'fashion mnist dataset', " "'cifar10'->'cifar-10 dataset'")) flags.DEFINE_string( 'attack_model', 'mnist_model', ("defence_model: 'basic_model'->'a cnn model for mnist', " "'all_cnn'->'a cnn model for cifar10', " "'cifar10_model'->'model for cifar10', " "'mnist_model'->'model for mnist'")) flags.DEFINE_integer('nb_filters', NB_FILTERS, 'Model size multiplier') flags.DEFINE_integer('nb_epochs', NB_EPOCHS, 'Number of epochs to train model')
import os import tensorflow as tf from tensorflow.python.platform import flags from a02_textcnn.model import Model from utils.preprocess import build_corpus FLAGS = flags.FLAGS flags.DEFINE_integer('batch_size', 256, 'count of each batch for train') flags.DEFINE_integer('embed_size', 100, 'dims of word embedding') flags.DEFINE_integer('class_num', 2, 'class num') flags.DEFINE_list('filters', [2, 3, 4], 'filters') flags.DEFINE_integer('filter_num', 10, 'filter_num') flags.DEFINE_integer('channel_size', 1, 'channel_size') flags.DEFINE_float('keep_prob', 0.9, 'keep_prob') flags.DEFINE_float('learning_rate', 0.9, 'learning rate') flags.DEFINE_integer('decay_step', 100, 'decay learning rate every decay_step') flags.DEFINE_float('decay_rate', 0.9, 'decay learning rate with decay_rate') flags.DEFINE_integer('epoch_num', 500, 'the number of epoch') flags.DEFINE_integer('epoch_val', 50, 'the freq for test val') flags.DEFINE_string('check_point', 'checkpoint/', 'checkpoint path') def main(_): train, test, _, sentence_size, vocab_size = build_corpus() train_x, train_y = train test_x, test_y = test config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: model = Model(sentence_size, FLAGS.class_num, vocab_size,
## Dataset/method options flags.DEFINE_integer( 'num_classes', 2, 'number of classes used in classification (e.g. 5-way classification).') flags.DEFINE_string('training_data_path', 'input/feature_extraction_train_updated.csv', 'path to training data') flags.DEFINE_string('testing_data_path', 'input/feature_extraction_test_updated.csv', 'path to testing data') flags.DEFINE_string('target_variable', 'label', 'name of the target variable column') flags.DEFINE_list('cols_drop', [ 'article_title', 'article_content', 'source', 'source_category', 'unit_id' ], 'list of column to drop from data, if any') flags.DEFINE_string('special_encoding', 'latin-1', 'special encoding needed to read the data, if any') flags.DEFINE_string('scaling', 'z-score', 'scaling done to the dataset, if any') flags.DEFINE_integer('pretrain_iterations', 0, 'number of pre-training iterations.') flags.DEFINE_integer('metatrain_iterations', 1000, 'number of metatraining iterations.') flags.DEFINE_integer('meta_batch_size', 32, 'number of tasks sampled per meta-update') flags.DEFINE_float('meta_lr', 0.1, 'the base learning rate of the generator') flags.DEFINE_integer(
import os import tensorflow as tf from tensorflow.python.platform import flags from a04_dcnn.model import Model from utils.preprocess import build_corpus FLAGS = flags.FLAGS flags.DEFINE_integer('batch_size', 256, 'count of each batch for train') flags.DEFINE_integer('embed_size', 100, 'dims of word embedding') flags.DEFINE_integer('class_num', 2, 'class num') flags.DEFINE_list('filters', [2, 3, 4], 'filters') flags.DEFINE_list('filter_num', [5, 10], 'filter_num') flags.DEFINE_integer('channel_size', 1, 'channel_size') flags.DEFINE_float('keep_prob', 0.5, 'keep_prob') flags.DEFINE_float('learning_rate', 0.01, 'learning rate') flags.DEFINE_integer('decay_step', 100, 'decay learning rate every decay_step') flags.DEFINE_float('decay_rate', 0.9, 'decay learning rate with decay_rate') flags.DEFINE_integer('epoch_num', 500, 'the number of epoch') flags.DEFINE_integer('epoch_val', 50, 'the freq for test val') flags.DEFINE_integer('k1', 20, 'the freq for test val') flags.DEFINE_integer('k_top', 4, 'the freq for test val') flags.DEFINE_string('check_point', 'checkpoint/', 'checkpoint path') def main(_): train, test, _, sentence_size, vocab_size = build_corpus() train_x, train_y = train test_x, test_y = test config = tf.ConfigProto() config.gpu_options.allow_growth = True
def getFlag(model_name): use_time = False if use_time: exp_name = datetime.datetime.now().strftime("%I:%M%p-%Y-%B-%d") else: exp_name = 'test' FLAGS = flags.FLAGS # Dataset Options: flags.DEFINE_integer('batch_size', 8, 'Size of a batch') # Base Model class Mandatory: flags.DEFINE_bool('train', True, 'whether to train or test') flags.DEFINE_bool('verbose', True, 'whether to show print information or not') flags.DEFINE_integer('epoch', 30, 'Number of Epochs to train on') flags.DEFINE_string('exp', exp_name, 'name of experiments') flags.DEFINE_integer('log_interval', 1, 'log outputs every so many epoch') flags.DEFINE_integer('val_interval', 3, 'validate every so many epoch') flags.DEFINE_integer( 'patience', 3, 'number of non-improving validation iterations before early stop') flags.DEFINE_integer('save_interval', 10, 'save outputs every so many iterations') ## Saver load or options: flags.DEFINE_integer('max_to_keep', 10, 'maximum number of models to keep') flags.DEFINE_integer('keep_checkpoint_every_n_hours', 3, 'check point intervals') flags.DEFINE_integer( 'resume_iter', -1, 'iteration to resume training from, -1 means not resuming') flags.DEFINE_string('ckptdir', global_macros.CKPT_ROOT + "/" + model_name, 'location where models will be stored') flags.DEFINE_string('logdir', global_macros.LOGGER_ROOT + "/" + model_name, 'location where log of experiments will be stored') ## Plot option: flags.DEFINE_bool('plot', True, 'plot after training') flags.DEFINE_bool('crop', False, 'crop regions') flags.DEFINE_bool('crop_stack', True, 'crop stack/ random crop') # learning rate flags.DEFINE_bool('L1_loss', False, 'Use L1 or L2 loss') flags.DEFINE_bool('weight_decay', False, 'Turn on weight decay or not') flags.DEFINE_float('lr', 1e-4, 'Learning rate for training') flags.DEFINE_float('lr_decay_val', 10, 'Learning rate decay ratio') flags.DEFINE_bool('recompute', False, 'use recomputation') # Model specific: flags.DEFINE_bool('temp_only', False, 'only use temperature channel or not') flags.DEFINE_bool('ssim', False, 'use ssim loss or not') # Unet specific: flags.DEFINE_bool('is_pad', True, 'Use padding for convolution or not') flags.DEFINE_integer('nfilters', 8, 'The number of base filters for unet') flags.DEFINE_integer('unet_levels', 3, 'Levels of Unet') flags.DEFINE_bool('img_emb', False, 'Use image embedding or not') # LCN specific: flags.DEFINE_list('lcn_kernel', [1, 3, 3], 'Kernel list for lcn model') flags.DEFINE_bool('regularize', False, 'Turn on regularizer for LCN') flags.DEFINE_float('alpha', 1e5, 'Regularizer value') # tile conv LCN flags.DEFINE_bool('use_LCN', False, 'use LCN as the last layer, tile conv LCN only') return FLAGS
# Training Options flags.DEFINE_integer('iterations', 500000, 'The number of training iterations.') flags.DEFINE_integer( 'batch_size', 64, 'The number of tasks sampled per batch (aka batch size).') flags.DEFINE_float('lr', 0.0001, 'The learning rate.') flags.DEFINE_integer('support', 5, 'The number of support examples per task (aka k-shot).') flags.DEFINE_integer('query', 5, 'The number of query examples per task.') flags.DEFINE_integer('embedding', 20, 'The embedding size.') # Model Options flags.DEFINE_string('activation', 'relu', 'One of relu, elu, or leaky_relu.') flags.DEFINE_bool('max_pool', False, 'Use max pool rather than strides.') flags.DEFINE_list('filters', [32, 64], 'List of filters per convolution layer.') flags.DEFINE_list('kernels', [3, 3], 'List of kernel sizes per convolution layer.') flags.DEFINE_list( 'strides', [2, 2], 'List of strides per convolution layer. ' 'Can be None if using max pooling.') flags.DEFINE_list('fc_layers', [64, 64], 'List of fully connected nodes per layer.') flags.DEFINE_float('drop_rate', 0.0, 'Dropout probability. 0 for no dropout.') flags.DEFINE_string('norm', None, 'One of layer, batch, or None') # Loss Options flags.DEFINE_float('lambda_embedding', 1.0, 'Lambda for the embedding loss.') flags.DEFINE_float('lambda_support', 1.0, 'Lambda for the support control loss.') flags.DEFINE_float('lambda_query', 1.0, 'Lambda for the query control loss.')