Exemplo n.º 1
0
from models import create_network, with_end_points
from utils import (build_result_str, save_images, save_network,
                   setup_train_experiment)

# experiment parameters
flags.DEFINE_integer("seed", 1, "experiment seed")
flags.DEFINE_string("name", None, "name of the experiment")
flags.DEFINE_string("data_dir", "data", "path to data")
flags.DEFINE_string("train_dir", "runs", "path to working dir")

# gan model parameters
flags.DEFINE_string("model", "mlp", "model name (mlp or mlp_with_bn)")
flags.DEFINE_string("layer_dims", "1000-1000-1000-10",
                    "dimensions of fully-connected layers")
flags.DEFINE_bool("use_dropout", False, "whenever to use dropout or not")
flags.DEFINE_float("lmbd", 1.0, "regularization coefficient")
flags.DEFINE_float("epsilon", 0.2, "epsilon for generative fgsm perturbation")

# adversary parameters
flags.DEFINE_integer("deepfool_iter", 25,
                     "maximum number of deepfool iterations")
flags.DEFINE_float("deepfool_clip", 0.5, "perturbation clip during search")
flags.DEFINE_float("deepfool_overshoot", 0.02,
                   "overshoot to improve speed of convergence")

# data parameters
flags.DEFINE_integer("batch_size", 100, "batch size")
flags.DEFINE_integer("test_batch_size", 100, "test batch size")
flags.DEFINE_integer("train_size", 50000, "training size")

# training parameters
Exemplo n.º 2
0
    'counts.')
flags.DEFINE_enum('preprocess_mode', 'PREPROCESS_SKIP_ALL_ZERO_COUNTS', [
    data.PREPROCESS_SKIP_ALL_ZERO_COUNTS,
    data.PREPROCESS_INJECT_RANDOM_SEQUENCES,
    data.PREPROCESS_ALL_COUNTS
], 'How to preprocess input data for training purposes.')
flags.DEFINE_list('input_features', [
    'SEQUENCE_ONE_HOT'
], 'List of features to use as inputs to the model. Valid choices: %r' %
                  _VALID_INPUT_FEATURES)
flags.DEFINE_integer(
    'kmer_k_max', 4,
    'Maximum k-mer size for which to calculate counts if using '
    'SEQUENCE_KMER_COUNT as a feature.')
flags.DEFINE_float(
    'ratio_random_dna', 1.0,
    'Ratio of random sequences to inject if using preprocess_mode == '
    'PREPROCESS_INJECT_RANDOM_SEQUENCES. Used to scale the default epoch_size.')
flags.DEFINE_integer(
    'total_reads_defining_positive', 0,
    'Number of reads required to be seen across all conditions to classify '
    'an example as positive.')
# TODO(mdimon): add a discounted cumulative gain when they are implemented
flags.DEFINE_list('metrics_measures',
                  ['auc/true_top_1p', 'spearman_correlation/score_top_1p'],
                  'Metric measurements to report to Vizier')
flags.DEFINE_string('hpconfig', '',
                    """A comma separated list of hyperparameters for the model.
    Format is hp1=value1,hp2=value2,etc. If this FLAG is set and
    there is a tuner, the tuner will train the model
    with the specified hyperparameters, filling in
    missing hyperparameters from the default_values in
Exemplo n.º 3
0
                   setup_train_experiment)

# experiment parameters
flags.DEFINE_integer("seed", 1, "experiment seed")
flags.DEFINE_string("name", None, "name of the experiment")
flags.DEFINE_string("data_dir", "data", "path to data")
flags.DEFINE_string("train_dir", "runs", "path to working dir")

# gan model parameters
flags.DEFINE_string("model", "mlp", "model name (mlp or mlp_with_bn)")
flags.DEFINE_string("layer_dims", "1000-1000-1000-10", "dimensions of fully-connected layers")
flags.DEFINE_bool("use_dropout", False, "whenever to use dropout or not")

# adversary parameters
flags.DEFINE_integer("deepfool_iter", 25, "maximum number of deepfool iterations")
flags.DEFINE_float("deepfool_clip", 0.5, "perturbation clip during search")
flags.DEFINE_float("deepfool_overshoot", 0.02, "overshoot to improve speed of convergence")

# data parameters
flags.DEFINE_integer("batch_size", 100, "batch size")
flags.DEFINE_integer("test_batch_size", 100, "test batch size")
flags.DEFINE_integer("train_size", 50000, "training size")

# training parameters
flags.DEFINE_integer("num_epochs", 100, "number of epochs to run")
flags.DEFINE_float("initial_learning_rate", 0.001, "initial learning rate")
flags.DEFINE_float("learning_rate_decay_factor", 0.95, "learning rate decay factor")
flags.DEFINE_float("start_learning_rate_decay", 0, "learning rate decay factor")

# logging parameters
flags.DEFINE_integer("checkpoint_frequency", 10, "checkpoint frequency (in epochs)")
Exemplo n.º 4
0
                    "Path to the second fastq file for paired-end "
                    "sequencing, or None for single end")
flags.DEFINE_integer("measurement_id", None,
                     "The measurement data set ID for this fastq pair, from "
                     "the experiment proto")
flags.DEFINE_integer("sequence_length", 40,
                     "Expected length of each sequence read")
flags.DEFINE_string("output_name",
                    "xxx"
                    "aptitude", "Path and name for the output sstable")
flags.DEFINE_integer("base_qual_threshold", 20, "integer indicating the "
                     "lowest quality (on scale from 0 to 40) for a single "
                     "base to be considered acceptable")
flags.DEFINE_integer("bad_base_threshold", 5, "integer indicating the maximum "
                     "number of bad bases before a read is bad quality")
flags.DEFINE_float("avg_qual_threshold", 30.0, "float indicating the mean "
                   "quality across the whole read to be considered good")
flags.DEFINE_integer("num_reads", 99999999999, "The number of reads to include "
                     "from each fastq file.")


def main(unused_argv):

  # read in the fastq file(s)
  make_zeros = lambda: [0]
  count_table = collections.defaultdict(make_zeros)

  # The readahead speeds up this process about 10x
  # While it is technically possible to pre-pend '/gzip' to automagically
  # un-gzip files, this doesn't play nice with the readahead prepend.
  # See b/63985459 for more information
  # As a result, we do not pre-pend '/gzip' anymore and instead do this
Exemplo n.º 5
0
from lasagne.objectives import categorical_accuracy

import flags
from at import fast_gradient_perturbation
from data import batch_iterator, mnist_load, select_balanced_subset
from deepfool import deepfool
from models import create_network, with_end_points
from utils import (load_network, load_training_params, build_result_str,
                   save_images)

flags.DEFINE_string("load_dir", None, "path to load checkpoint from")
flags.DEFINE_integer("load_epoch", None, "epoch for which restore model")
flags.DEFINE_string("working_dir", "test", "path to working dir")
flags.DEFINE_bool("sort_labels", True, "sort labels")
flags.DEFINE_integer("batch_size", 100, "batch_index size (default: 100)")
flags.DEFINE_float("fgsm_epsilon", 0.2, "fast gradient epsilon (default: 0.2)")
flags.DEFINE_integer("deepfool_iter", 50,
                     "maximum number of deepfool iterations (default: 25)")
flags.DEFINE_float("deepfool_clip", 0.5,
                   "perturbation clip during search (default: 0.1)")
flags.DEFINE_float("deepfool_overshoot", 0.02,
                   "multiplier for final perturbation")
flags.DEFINE_integer("summary_frequency", 10, "summarize frequency")

FLAGS = flags.FLAGS
logger = logging.getLogger()


def setup_experiment():
    if not os.path.exists(FLAGS.load_dir) or not os.path.isdir(FLAGS.load_dir):
        raise ValueError("Could not find folder %s" % FLAGS.load_dir)
Exemplo n.º 6
0
flags.DEFINE_boolean('use_feature', True, 'Use feature or not')
flags.DEFINE_boolean('use_embedding', True, 'Use embedding or not')
flags.DEFINE_integer('feat_dim', -1, None)
flags.DEFINE_list(
    'node_dim', [256],
    'Dimension of hidden layers between feature and node embedding')
flags.DEFINE_list(
    'instance_h_dim', [256],
    'Dimension of hidden layers between node embedding and instance embedding, last element is the dimension of instance embedding'
)
flags.DEFINE_list(
    'graph_h_dim', [128],
    'Dimension of hidden layers between instance embedding and subgraph embedding, last element is the dimension of subgraph embedding'
)
flags.DEFINE_float('keep_prob', 0.6, 'Used for dropout')

flags.DEFINE_list('kernel_sizes', [1], 'List of number of nodes in kernel')
flags.DEFINE_string('pooling', 'max', '[max, average, sum]')

flags.DEFINE_integer('epoch', 4, None)
flags.DEFINE_float('learning_rate', 1e-4, None)
flags.DEFINE_float('lambda_2', 1e-2, 'Coefficient of l2 regularization loss')
flags.DEFINE_float('memory_fraction', 0.5, None)

FLAGS = flags.FLAGS

if __name__ == '__main__':
    predictor = Predictor(FLAGS)
    train_accuracy, test_accuracy = predictor.fit()
    print('Training Accuracy: %f', train_accuracy)