コード例 #1
0
 def __init__(self, subparsers, command_dict):
     super(MediaLogStoreRetrieveCommand, self).__init__(subparsers, command_dict)
     self._parser.add_argument('camera_name', default='pano', const='pano', nargs='?',
                               choices=['pano', 'c0', 'c1', 'c2', 'c3', 'c4', 'ptz', 'ir'])
     self._parser.add_argument('--dst', default=None, help='Filename of saved image')
     add_bool_arg(self._parser, 'save-as-rgb24', default=False)
     add_bool_arg(self._parser, 'stitching', default=True)
コード例 #2
0
 def __init__(self, subparsers, command_dict):
     super(MediaLogEnableDebugCommand, self).__init__(subparsers, command_dict)
     add_bool_arg(self._parser, 'temperature')
     add_bool_arg(self._parser, 'humidity')
     add_bool_arg(self._parser, 'BIT')
     add_bool_arg(self._parser, 'shock', default=True)
     add_bool_arg(self._parser, 'system-stats')
コード例 #3
0
ファイル: compositor.py プロジェクト: yan99uic/spot-sdk
 def __init__(self, subparsers, command_dict):
     super(CompositorSetIrMeterOverlayCommand, self).__init__(subparsers, command_dict)
     self._parser.add_argument('color', default='jet', const='jet', nargs='?', choices=['jet', 'greyscale', 'grayscale'])
     self._parser.add_argument('-x', default=0.5,
                               help='horizontal coordinate of reticle')
     self._parser.add_argument('-y', default=0.5,
                               help='vertical coordinate of reticle')
     add_bool_arg(self._parser, 'enable', default=True)
コード例 #4
0
ファイル: compositor.py プロジェクト: yan99uic/spot-sdk
 def __init__(self, subparsers, command_dict):
     super(CompositorSetIrColorMapCommand, self).__init__(subparsers, command_dict)
     self._parser.add_argument('color', default='jet', const='jet', nargs='?', choices=['jet', 'greyscale', 'grayscale'])
     self._parser.add_argument('--min-temp', default=0.0, type=float,
                               help='minimum temperature on the temperature scale')
     self._parser.add_argument('--max-temp', default=100.0, type=float,
                               help='maximum temperature on the temperature scale')
     add_bool_arg(self._parser, 'auto_scale', default=True)
コード例 #5
0
ファイル: media_log.py プロジェクト: utexas-bwi/spot-sdk
 def __init__(self, subparsers, command_dict):
     super(MediaLogRetrieveCommand, self).__init__(subparsers, command_dict)
     self._parser.add_argument('name', help='"name" of a logpoint')
     self._parser.add_argument('--dst',
                               default=None,
                               help='Filename of saved image')
     add_bool_arg(self._parser, 'save-as-rgb24', default=False)
     add_bool_arg(self._parser, 'stitching', default=True)
コード例 #6
0
ファイル: power.py プロジェクト: xwixcn/spot-sdk
 def __init__(self, subparsers, command_dict):
     super(PowerSetPowerStatusCommand,
           self).__init__(subparsers, command_dict)
     add_bool_arg(self._parser, 'ptz', default=True)
     add_bool_arg(self._parser, 'aux1')
     add_bool_arg(self._parser, 'aux2')
     add_bool_arg(self._parser, 'external_mic')
コード例 #7
0
def parse_args():
    import argparse

    dir_path = dirname(realpath(__file__))

    parser = argparse.ArgumentParser()

    parser.add_argument("--num-samples", type=int, default=100000, help="number of samples to use for training")
    parser.add_argument("--G", type=str, default="", help="model #")

    parser.add_argument("--log-file", type=str, default="", help='log file name - default is name of file in outs/ ; "stdout" prints to console')
    parser.add_argument("--log", type=str, default="INFO", help="log level", choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'])

    parser.add_argument("--model", type=str, default="ParticleNet", help="classifier to train", choices=['ParticleNet', 'JEDINet'])

    parser.add_argument("--dir-path", type=str, default=dir_path, help="path where dataset and output will be stored")
    utils.add_bool_arg(parser, "n", "run on nautilus cluster", default=False)

    utils.add_bool_arg(parser, "load-model", "load a pretrained model", default=False)
    parser.add_argument("--start-epoch", type=int, default=0, help="which epoch to start training on (only makes sense if loading a model)")

    parser.add_argument("--num_hits", type=int, default=30, help="num nodes in graph")
    utils.add_bool_arg(parser, "mask", "use masking", default=False)

    parser.add_argument("--num-epochs", type=int, default=1000, help="number of epochs to train")

    parser.add_argument("--batch-size", type=int, default=384, help="batch size")
    parser.add_argument("--optimizer", type=str, default="adam", help="pick optimizer", choices=['adam', 'rmsprop', 'adamw'])
    parser.add_argument('--lr', type=float, default=3e-4)

    parser.add_argument("--name", type=str, default="test", help="name or tag for model; will be appended with other info")
    args = parser.parse_args()

    if(args.n):
        args.dir_path = "/graphganvol/mnist_graph_gan/jets/"

    args.node_feat_size = 4 if args.mask else 3

    return args
コード例 #8
0
 def __init__(self, subparsers, command_dict):
     super(MediaLogRetrieveAllCommand, self).__init__(subparsers, command_dict)
     add_bool_arg(self._parser, 'save-as-rgb24', default=False)
     add_bool_arg(self._parser, 'stitching', default=False)
コード例 #9
0
ファイル: main.py プロジェクト: jmduarte/graph-gan
def parse_args():
    dir_path = dirname(realpath(__file__))

    parser = argparse.ArgumentParser()

    # meta

    parser.add_argument(
        "--name",
        type=str,
        default="test",
        help="name or tag for model; will be appended with other info")
    utils.add_bool_arg(parser,
                       "train",
                       "use training or testing dataset for model",
                       default=True,
                       no_name="test")
    parser.add_argument("--ttsplit",
                        type=float,
                        default=0.85,
                        help="ratio of train/test split")

    utils.add_bool_arg(parser,
                       "load-model",
                       "load a pretrained model",
                       default=True)
    utils.add_bool_arg(
        parser,
        "override-args",
        "override original model args when loading with new args",
        default=False)
    parser.add_argument(
        "--start-epoch",
        type=int,
        default=-1,
        help=
        "which epoch to start training on, only applies if loading a model, by default start at the highest epoch model"
    )
    parser.add_argument("--num-epochs",
                        type=int,
                        default=2000,
                        help="number of epochs to train")

    parser.add_argument("--dir-path",
                        type=str,
                        default=dir_path,
                        help="path where dataset and output will be stored")

    parser.add_argument("--num-samples",
                        type=int,
                        default=10000,
                        help="num samples to evaluate every 5 epochs")

    utils.add_bool_arg(parser, "n", "run on nautilus cluster", default=False)
    utils.add_bool_arg(parser,
                       "bottleneck",
                       "use torch.utils.bottleneck settings",
                       default=False)
    utils.add_bool_arg(parser, "lx", "run on lxplus", default=False)

    utils.add_bool_arg(parser,
                       "save-zero",
                       "save the initial figure",
                       default=False)
    parser.add_argument("--save-epochs",
                        type=int,
                        default=5,
                        help="save outputs per how many epochs")

    utils.add_bool_arg(parser, "debug", "debug mode", default=False)

    parser.add_argument("--jets",
                        type=str,
                        default="g",
                        help="jet type - options are g or t")

    # architecture

    parser.add_argument("--num-hits",
                        type=int,
                        default=30,
                        help="number of hits")
    parser.add_argument("--coords",
                        type=str,
                        default="polarrel",
                        help="cartesian, polarrel or polarrelabspt")

    parser.add_argument("--norm",
                        type=float,
                        default=1,
                        help="normalizing max value of features to this value")

    parser.add_argument("--sd",
                        type=float,
                        default=0.2,
                        help="standard deviation of noise")

    parser.add_argument("--node-feat-size",
                        type=int,
                        default=3,
                        help="node feature size")
    parser.add_argument(
        "--hidden-node-size",
        type=int,
        default=32,
        help="hidden vector size of each node (incl node feature size)")
    parser.add_argument(
        "--latent-node-size",
        type=int,
        default=0,
        help=
        "latent vector size of each node - 0 means same as hidden node size")

    parser.add_argument(
        "--clabels",
        type=int,
        default=0,
        help=
        "0 - no clabels, 1 - clabels with pt only, 2 - clabels with pt and detach"
    )
    utils.add_bool_arg(parser,
                       "clabels-fl",
                       "use conditional labels in first layer",
                       default=True)
    utils.add_bool_arg(parser,
                       "clabels-hl",
                       "use conditional labels in hidden layers",
                       default=True)

    parser.add_argument("--fn",
                        type=int,
                        nargs='*',
                        default=[256, 256],
                        help="hidden fn layers e.g. 256 256")
    parser.add_argument(
        "--fe1g",
        type=int,
        nargs='*',
        default=0,
        help=
        "hidden and output gen fe layers e.g. 64 128 in the first iteration - 0 means same as fe"
    )
    parser.add_argument(
        "--fe1d",
        type=int,
        nargs='*',
        default=0,
        help=
        "hidden and output disc fe layers e.g. 64 128 in the first iteration - 0 means same as fe"
    )
    parser.add_argument("--fe",
                        type=int,
                        nargs='+',
                        default=[96, 160, 192],
                        help="hidden and output fe layers e.g. 64 128")
    parser.add_argument("--fnd",
                        type=int,
                        nargs='*',
                        default=[256, 128],
                        help="hidden disc output layers e.g. 256 128")
    parser.add_argument(
        "--mp-iters-gen",
        type=int,
        default=0,
        help="number of message passing iterations in the generator")
    parser.add_argument(
        "--mp-iters-disc",
        type=int,
        default=0,
        help=
        "number of message passing iterations in the discriminator (if applicable)"
    )
    parser.add_argument(
        "--mp-iters",
        type=int,
        default=2,
        help=
        "number of message passing iterations in gen and disc both - will be overwritten by gen or disc specific args if given"
    )
    utils.add_bool_arg(parser,
                       "sum",
                       "mean or sum in models",
                       default=True,
                       no_name="mean")

    utils.add_bool_arg(parser, "int-diffs", "use int diffs", default=False)
    utils.add_bool_arg(parser, "pos-diffs", "use pos diffs", default=True)
    # utils.add_bool_arg(parser, "scalar-diffs", "use scalar diff (as opposed to vector)", default=True)
    utils.add_bool_arg(parser,
                       "deltar",
                       "use delta r as an edge feature",
                       default=True)
    utils.add_bool_arg(parser,
                       "deltacoords",
                       "use delta coords as edge features",
                       default=False)

    parser.add_argument("--leaky-relu-alpha",
                        type=float,
                        default=0.2,
                        help="leaky relu alpha")

    utils.add_bool_arg(parser,
                       "dea",
                       "use early averaging discriminator",
                       default=False)
    utils.add_bool_arg(parser,
                       "fcg",
                       "use a fully connected graph",
                       default=True)

    parser.add_argument("--glorot",
                        type=float,
                        default=0,
                        help="gain of glorot - if zero then glorot not used")

    utils.add_bool_arg(parser, "gtanh", "use tanh for g output", default=True)
    utils.add_bool_arg(parser,
                       "dearlysigmoid",
                       "use early sigmoid in d",
                       default=False)

    utils.add_bool_arg(parser,
                       "mask",
                       "use masking for zero-padded particles",
                       default=False)
    utils.add_bool_arg(parser,
                       "mask-weights",
                       "weight D nodes by mask",
                       default=False)

    # optimization

    parser.add_argument(
        "--optimizer",
        type=str,
        default="rmsprop",
        help="optimizer - options are adam, rmsprop, adadelta or acgd")
    parser.add_argument("--loss",
                        type=str,
                        default="ls",
                        help="loss to use - options are og, ls, w, hinge")

    parser.add_argument("--lr-disc",
                        type=float,
                        default=3e-5,
                        help="learning rate discriminator")
    parser.add_argument("--lr-gen",
                        type=float,
                        default=1e-5,
                        help="learning rate generator")
    parser.add_argument("--beta1",
                        type=float,
                        default=0.9,
                        help="Adam optimizer beta1")
    parser.add_argument("--beta2",
                        type=float,
                        default=0.999,
                        help="Adam optimizer beta2")
    parser.add_argument("--batch-size", type=int, default=0, help="batch size")

    parser.add_argument(
        "--num-critic",
        type=int,
        default=1,
        help="number of critic updates for each generator update")
    parser.add_argument(
        "--num-gen",
        type=int,
        default=1,
        help=
        "number of generator updates for each critic update (num-critic must be 1 for this to apply)"
    )

    # regularization

    utils.add_bool_arg(parser,
                       "batch-norm-disc",
                       "use batch normalization",
                       default=False)
    utils.add_bool_arg(parser,
                       "batch-norm-gen",
                       "use batch normalization",
                       default=False)
    utils.add_bool_arg(parser,
                       "spectral-norm-disc",
                       "use spectral normalization in discriminator",
                       default=False)
    utils.add_bool_arg(parser,
                       "spectral-norm-gen",
                       "use spectral normalization in generator",
                       default=False)

    parser.add_argument("--disc-dropout",
                        type=float,
                        default=0.5,
                        help="fraction of discriminator dropout")
    parser.add_argument("--gen-dropout",
                        type=float,
                        default=0,
                        help="fraction of generator dropout")

    utils.add_bool_arg(parser,
                       "label-smoothing",
                       "use label smoothing with discriminator",
                       default=False)
    parser.add_argument("--label-noise",
                        type=float,
                        default=0,
                        help="discriminator label noise (between 0 and 1)")

    parser.add_argument(
        "--gp",
        type=float,
        default=0,
        help="WGAN generator penalty weight - 0 means not used")

    # augmentation

    # remember to add any new args to the if statement below
    utils.add_bool_arg(parser,
                       "aug-t",
                       "augment with translations",
                       default=False)
    utils.add_bool_arg(parser, "aug-f", "augment with flips", default=False)
    utils.add_bool_arg(parser,
                       "aug-r90",
                       "augment with 90 deg rotations",
                       default=False)
    utils.add_bool_arg(parser, "aug-s", "augment with scalings", default=False)
    parser.add_argument("--translate-ratio",
                        type=float,
                        default=0.125,
                        help="random translate ratio")
    parser.add_argument("--scale-sd",
                        type=float,
                        default=0.125,
                        help="random scale lognormal standard deviation")
    parser.add_argument("--translate-pn-ratio",
                        type=float,
                        default=0.05,
                        help="random translate per node ratio")

    utils.add_bool_arg(parser,
                       "adaptive-prob",
                       "adaptive augment probability",
                       default=False)
    parser.add_argument("--aug-prob",
                        type=float,
                        default=1.0,
                        help="probability of being augmented")

    # evaluation

    utils.add_bool_arg(parser, "fid", "calc fid", default=False)
    parser.add_argument("--fid-eval-size",
                        type=int,
                        default=8192,
                        help="number of samples generated for evaluating fid")
    parser.add_argument("--fid-batch-size",
                        type=int,
                        default=32,
                        help="batch size when generating samples for fid eval")
    parser.add_argument("--gpu-batch", type=int, default=50, help="")

    utils.add_bool_arg(parser, "w1", "calc w1", default=True)
    parser.add_argument("--w1-num-samples",
                        type=int,
                        nargs='+',
                        default=[100, 1000, 10000],
                        help='array of # of jet samples to test')

    parser.add_argument("--jet-features",
                        type=str,
                        nargs='*',
                        default=['mass', 'pt'],
                        help='jet level features to evaluate')

    args = parser.parse_args()

    if (args.aug_t or args.aug_f or args.aug_r90 or args.aug_s):
        args.augment = True
    else:
        args.augment = False

    if not (args.coords == 'cartesian' or args.coords == 'polarrel'
            or args.coords == 'polarrelabspt'):
        print("invalid coordinate system - exiting")
        sys.exit()

    if not (args.jets == 'g' or args.jets == 't'):
        print("invalid jet type - exiting")
        sys.exit()

    if not args.coords == 'polarrelabspt':
        print("Can't have jet level features for this coordinate system")
        args.jf = False
    elif len(args.jet_features):
        args.jf = True

    if (not (args.loss == 'w' or args.loss == 'og' or args.loss == 'ls'
             or args.loss == 'hinge')):
        print("invalid loss - exiting")
        sys.exit()

    if (args.int_diffs):
        print("int_diffs not supported yet - exiting")
        sys.exit()

    if (args.augment):
        print("augmentation not implemented yet - exiting")
        sys.exit()

    if (args.optimizer == 'acgd'
            and (args.num_critic != 1 or args.num_gen != 1)):
        print("acgd can't have num critic or num gen > 1 - exiting")
        sys.exit()

    if (args.n and args.lx):
        print("can't be on nautilus and lxplus both - exiting")
        sys.exit()

    if (args.latent_node_size and args.latent_node_size < 3):
        print("latent node size can't be less than 2 - exiting")
        sys.exit()

    if (args.clabels > 2):
        print("clabels can't be greater than 2 - exiting")
        sys.exit()

    if (args.n):
        args.dir_path = "/graphganvol/mnist_graph_gan/jets"
        args.save_zero = True

    if (args.bottleneck):
        args.save_zero = False

    if (args.lx):
        args.dir_path = "/eos/user/r/rkansal/mnist_graph_gan/jets"
        args.save_zero = True

    if (args.batch_size == 0):
        if args.num_hits == 30:
            args.batch_size = 128
        elif args.num_hits == 100:
            args.batch_size = 32

    if not args.mp_iters_gen: args.mp_iters_gen = args.mp_iters
    if not args.mp_iters_disc: args.mp_iters_disc = args.mp_iters

    args.clabels_first_layer = args.clabels if args.clabels_fl else 0
    args.clabels_hidden_layers = args.clabels if args.clabels_hl else 0

    if args.mask:
        args.node_feat_size += 1
    else:
        args.mask_weights = False

    return args
コード例 #10
0
ファイル: main.py プロジェクト: sznajder/mnist_graph_gan
def parse_args():
    dir_path = dirname(realpath(__file__))

    parser = argparse.ArgumentParser()

    # utils.add_bool_arg(parser, "gru", "use GRUs", default=False)
    # parser.add_argument("--fe-hidden-size", type=int, default=128, help="edge network hidden layer size")
    # parser.add_argument("--fe-out-size", type=int, default=256, help="edge network out size")
    #
    # parser.add_argument("--fn-hidden-size", type=int, default=256, help="message passing hidden layers sizes")
    # parser.add_argument("--fn-num-layers", type=int, default=2, help="message passing number of layers in generator")

    # meta

    parser.add_argument(
        "--name",
        type=str,
        default="test",
        help="name or tag for model; will be appended with other info")
    utils.add_bool_arg(parser,
                       "train",
                       "use training or testing dataset for model",
                       default=True,
                       no_name="test")
    parser.add_argument("--num",
                        type=int,
                        nargs='+',
                        default=[3],
                        help="number to train on")

    utils.add_bool_arg(parser,
                       "load-model",
                       "load a pretrained model",
                       default=True)
    utils.add_bool_arg(
        parser,
        "override-args",
        "override original model args when loading with new args",
        default=False)
    parser.add_argument(
        "--start-epoch",
        type=int,
        default=-1,
        help=
        "which epoch to start training on, only applies if loading a model, by default start at the highest epoch model"
    )
    parser.add_argument("--num-epochs",
                        type=int,
                        default=2000,
                        help="number of epochs to train")

    parser.add_argument("--dir-path",
                        type=str,
                        default=dir_path,
                        help="path where dataset and output will be stored")

    parser.add_argument("--num_samples",
                        type=int,
                        default=100,
                        help="num samples to save every 5 epochs")

    utils.add_bool_arg(parser,
                       "sparse-mnist",
                       "use sparse mnist dataset (as opposed to superpixels)",
                       default=False)

    utils.add_bool_arg(parser, "n", "run on nautilus cluster", default=False)
    utils.add_bool_arg(parser,
                       "bottleneck",
                       "use torch.utils.bottleneck settings",
                       default=False)
    utils.add_bool_arg(parser, "lx", "run on lxplus", default=False)

    utils.add_bool_arg(parser,
                       "save-zero",
                       "save the initial figure",
                       default=False)

    utils.add_bool_arg(parser, "debug", "debug mode", default=False)

    # architecture

    parser.add_argument("--num-hits",
                        type=int,
                        default=75,
                        help="number of hits")
    parser.add_argument("--sd",
                        type=float,
                        default=0.2,
                        help="standard deviation of noise")

    parser.add_argument("--node-feat-size",
                        type=int,
                        default=3,
                        help="node feature size")
    parser.add_argument(
        "--hidden-node-size",
        type=int,
        default=32,
        help="hidden vector size of each node (incl node feature size)")
    parser.add_argument(
        "--latent-node-size",
        type=int,
        default=0,
        help=
        "latent vector size of each node - 0 means same as hidden node size")

    parser.add_argument("--fn",
                        type=int,
                        nargs='*',
                        default=[256, 256],
                        help="hidden fn layers e.g. 256 256")
    parser.add_argument(
        "--fe1g",
        type=int,
        nargs='*',
        default=0,
        help=
        "hidden and output gen fe layers e.g. 64 128 in the first iteration - 0 means same as fe"
    )
    parser.add_argument(
        "--fe1d",
        type=int,
        nargs='*',
        default=0,
        help=
        "hidden and output disc fe layers e.g. 64 128 in the first iteration - 0 means same as fe"
    )
    parser.add_argument("--fe",
                        type=int,
                        nargs='+',
                        default=[64, 128],
                        help="hidden and output fe layers e.g. 64 128")
    parser.add_argument("--fnd",
                        type=int,
                        nargs='*',
                        default=[256, 128],
                        help="hidden disc output layers e.g. 256 128")
    parser.add_argument(
        "--mp-iters-gen",
        type=int,
        default=0,
        help="number of message passing iterations in the generator")
    parser.add_argument(
        "--mp-iters-disc",
        type=int,
        default=0,
        help=
        "number of message passing iterations in the discriminator (if applicable)"
    )
    parser.add_argument(
        "--mp-iters",
        type=int,
        default=2,
        help=
        "number of message passing iterations in gen and disc both - will be overwritten by gen or disc specific args if given"
    )
    parser.add_argument("--kernel-size",
                        type=int,
                        default=25,
                        help="graph convolutional layer kernel size")
    utils.add_bool_arg(parser,
                       "sum",
                       "mean or sum in models",
                       default=True,
                       no_name="mean")

    utils.add_bool_arg(parser, "int-diffs", "use int diffs", default=False)
    utils.add_bool_arg(parser, "pos-diffs", "use pos diffs", default=True)

    parser.add_argument("--leaky-relu-alpha",
                        type=float,
                        default=0.2,
                        help="leaky relu alpha")

    utils.add_bool_arg(parser, "gcnn", "use gcnn", default=False)
    parser.add_argument("--cutoff",
                        type=float,
                        default=0.32178,
                        help="cutoff edge distance"
                        )  # found empirically to match closest to Superpixels

    utils.add_bool_arg(parser,
                       "dea",
                       "use early averaging discriminator",
                       default=False)
    utils.add_bool_arg(parser,
                       "fcg",
                       "use a fully connected graph",
                       default=True)

    parser.add_argument("--glorot",
                        type=float,
                        default=0,
                        help="gain of glorot - if zero then glorot not used")

    # optimization

    parser.add_argument(
        "--optimizer",
        type=str,
        default="adam",
        help="optimizer - options are adam, rmsprop, adadelta or acgd")
    parser.add_argument("--loss",
                        type=str,
                        default="ls",
                        help="loss to use - options are og, ls, w, hinge")

    parser.add_argument("--lr-disc",
                        type=float,
                        default=1e-4,
                        help="learning rate discriminator")
    parser.add_argument("--lr-gen",
                        type=float,
                        default=1e-4,
                        help="learning rate generator")
    parser.add_argument("--beta1",
                        type=float,
                        default=0.9,
                        help="Adam optimizer beta1")
    parser.add_argument("--beta2",
                        type=float,
                        default=0.999,
                        help="Adam optimizer beta2")
    parser.add_argument("--batch-size",
                        type=int,
                        default=10,
                        help="batch size")

    parser.add_argument(
        "--num-critic",
        type=int,
        default=1,
        help="number of critic updates for each generator update")
    parser.add_argument(
        "--num-gen",
        type=int,
        default=1,
        help=
        "number of generator updates for each critic update (num-critic must be 1 for this to apply)"
    )

    # regularization

    utils.add_bool_arg(parser,
                       "batch-norm-disc",
                       "use batch normalization",
                       default=False)
    utils.add_bool_arg(parser,
                       "batch-norm-gen",
                       "use batch normalization",
                       default=False)
    utils.add_bool_arg(parser,
                       "spectral-norm-disc",
                       "use spectral normalization in discriminator",
                       default=False)
    utils.add_bool_arg(parser,
                       "spectral-norm-gen",
                       "use spectral normalization in generator",
                       default=False)

    parser.add_argument("--disc-dropout",
                        type=float,
                        default=0.5,
                        help="fraction of discriminator dropout")
    parser.add_argument("--gen-dropout",
                        type=float,
                        default=0,
                        help="fraction of generator dropout")

    utils.add_bool_arg(parser,
                       "label-smoothing",
                       "use label smoothing with discriminator",
                       default=False)
    parser.add_argument("--label-noise",
                        type=float,
                        default=0,
                        help="discriminator label noise (between 0 and 1)")

    # utils.add_bool_arg(parser, "gp", "use gradient penalty", default=False)
    parser.add_argument(
        "--gp",
        type=float,
        default=0,
        help="WGAN generator penalty weight - 0 means not used")

    utils.add_bool_arg(parser, "gom", "use gen only mode", default=False)
    utils.add_bool_arg(parser, "bgm", "use boost g mode", default=False)
    utils.add_bool_arg(parser, "rd", "use restart d mode", default=False)
    parser.add_argument("--bag", type=float, default=0.1, help="bag")

    parser.add_argument("--unrolled-steps",
                        type=int,
                        default=0,
                        help="number of unrolled D steps for G training")

    # augmentation

    # remember to add any new args to the if statement below
    utils.add_bool_arg(parser,
                       "aug-t",
                       "augment with translations",
                       default=False)
    utils.add_bool_arg(parser, "aug-f", "augment with flips", default=False)
    utils.add_bool_arg(parser,
                       "aug-r90",
                       "augment with 90 deg rotations",
                       default=False)
    utils.add_bool_arg(parser, "aug-s", "augment with scalings", default=False)
    parser.add_argument("--translate-ratio",
                        type=float,
                        default=0.125,
                        help="random translate ratio")
    parser.add_argument("--scale-sd",
                        type=float,
                        default=0.125,
                        help="random scale lognormal standard deviation")
    parser.add_argument("--translate-pn-ratio",
                        type=float,
                        default=0.05,
                        help="random translate per node ratio")

    utils.add_bool_arg(parser,
                       "adaptive-prob",
                       "adaptive augment probability",
                       default=False)
    parser.add_argument("--aug-prob",
                        type=float,
                        default=1.0,
                        help="probability of being augmented")

    # evaluation

    utils.add_bool_arg(parser, "fid", "calc fid", default=True)
    parser.add_argument("--fid-eval-size",
                        type=int,
                        default=8192,
                        help="number of samples generated for evaluating fid")
    parser.add_argument("--fid-batch-size",
                        type=int,
                        default=32,
                        help="batch size when generating samples for fid eval")
    parser.add_argument("--gpu-batch", type=int, default=50, help="")

    args = parser.parse_args()

    if isinstance(args.num, list) and len(args.num) == 1:
        args.num = args.num[0]
    elif args.gcnn:
        print("multiple numbers and gcnn not support yet - exiting")
        sys.exit()
    elif isinstance(args.num, list):
        args.num = list(set(args.num))  # remove duplicates
        args.num.sort()
        print(args.num)

    if (args.aug_t or args.aug_f or args.aug_r90 or args.aug_s):
        args.augment = True
    else:
        args.augment = False

    if (not (args.loss == 'w' or args.loss == 'og' or args.loss == 'ls'
             or args.loss == 'hinge')):
        print("invalid loss - exiting")
        sys.exit()

    if (args.int_diffs and not args.pos_diffs):
        print(
            "int_diffs = true and pos_diffs = false not supported yet - exiting"
        )
        sys.exit()

    if (args.augment and args.gcnn):
        print("augmentation not implemented with GCNN yet - exiting")
        sys.exit()

    if (args.optimizer == 'acgd'
            and (args.num_critic != 1 or args.num_gen != 1)):
        print("acgd can't have num critic or num gen > 1 - exiting")
        sys.exit()

    if (args.n and args.lx):
        print("can't be on nautilus and lxplus both - exiting")
        sys.exit()

    if (args.num_samples != 100):
        print(
            "save outputs not coded for anything other than 100 samples yet - exiting"
        )
        sys.exit()

    if (args.n):
        args.dir_path = "/graphganvol/mnist_graph_gan/mnist_superpixels"
        args.save_zero = True

    if (args.bottleneck):
        args.save_zero = False

    if (args.lx):
        args.dir_path = "/eos/user/r/rkansal/mnist_graph_gan/mnist_superpixels"
        args.save_zero = True

    if not args.mp_iters_gen: args.mp_iters_gen = args.mp_iters
    if not args.mp_iters_disc: args.mp_iters_disc = args.mp_iters

    if (args.latent_node_size and args.latent_node_size < 2):
        print("latent node size can't be less than 2 - exiting")
        sys.exit()

    args.channels = [64, 32, 16, 1]

    return args
コード例 #11
0

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Parse parameters')
    # Argument for model name
    parser.add_argument(
        "model",
        help=
        "one of vae_normal_with_1_kl_0_mmd or pixelvae_categorical_with_1_kl_0_mmd or pixelcnn_4}"
    )
    parser.add_argument("--dataset",
                        help="one of MNIST, MovingMNIST}",
                        default="MovingMNIST")
    parser.add_argument("--folder_suffix", help="suffix to folder", default="")
    # Argument for systems requirement
    add_bool_arg(parser, 'gpu', default=True)
    parser.add_argument('--gpu_id',
                        help='GPU id, check with nvidia-smi',
                        type=str,
                        default="0")

    # Argument for more generic stuff regarding dataloader and epochs
    parser.add_argument('--plot_interval',
                        help='plot how many times an epoch',
                        type=int,
                        default=1)
    parser.add_argument('--epochs',
                        help='how many epochs',
                        type=int,
                        default=10)
    parser.add_argument('--train_batch_size',
コード例 #12
0
def parse_args():
    parser = argparse.ArgumentParser()

    # meta

    parser.add_argument(
        "--name",
        type=str,
        default="test",
        help="name or tag for model; will be appended with other info")
    parser.add_argument("--dataset",
                        type=str,
                        default="jets",
                        help="dataset to use",
                        choices=['jets', 'jets-lagan'])

    utils.add_bool_arg(parser,
                       "train",
                       "use training or testing dataset for model",
                       default=True,
                       no_name="test")
    parser.add_argument("--ttsplit",
                        type=float,
                        default=0.7,
                        help="ratio of train/test split")

    parser.add_argument("--model",
                        type=str,
                        default="mpgan",
                        help="model to run",
                        choices=['mpgan', 'rgan', 'graphcnngan'])
    parser.add_argument(
        "--model-D",
        type=str,
        default="",
        help=
        "model discriminator, mpgan default is mpgan, rgan and graphcnngan default is rgan",
        choices=['mpgan', 'rgan', 'pointnet'])

    utils.add_bool_arg(parser,
                       "load-model",
                       "load a pretrained model",
                       default=True)
    utils.add_bool_arg(parser,
                       "override-load-check",
                       "override check for whether name has already been used",
                       default=False)
    utils.add_bool_arg(
        parser,
        "override-args",
        "override original model args when loading with new args",
        default=False)
    parser.add_argument(
        "--start-epoch",
        type=int,
        default=-1,
        help=
        "which epoch to start training on, only applies if loading a model, by default start at the highest epoch model"
    )
    parser.add_argument("--num-epochs",
                        type=int,
                        default=2000,
                        help="number of epochs to train")

    parser.add_argument("--dir-path",
                        type=str,
                        default="",
                        help="path where dataset and output will be stored")

    parser.add_argument("--num-samples",
                        type=int,
                        default=50000,
                        help="num samples to evaluate every 5 epochs")

    utils.add_bool_arg(parser, "n", "run on nautilus cluster", default=False)
    utils.add_bool_arg(parser,
                       "bottleneck",
                       "use torch.utils.bottleneck settings",
                       default=False)
    utils.add_bool_arg(parser, "lx", "run on lxplus", default=False)

    utils.add_bool_arg(parser,
                       "save-zero",
                       "save the initial figure",
                       default=False)
    utils.add_bool_arg(parser,
                       "no-save-zero-or",
                       "override --n save-zero default",
                       default=False)
    parser.add_argument("--save-epochs",
                        type=int,
                        default=0,
                        help="save outputs per how many epochs")
    parser.add_argument("--save-model-epochs",
                        type=int,
                        default=0,
                        help="save models per how many epochs")

    utils.add_bool_arg(parser, "debug", "debug mode", default=False)
    utils.add_bool_arg(parser,
                       "break-zero",
                       "break after 1 iteration",
                       default=False)
    utils.add_bool_arg(parser,
                       "low-samples",
                       "small number of samples for debugging",
                       default=False)

    utils.add_bool_arg(parser,
                       "const-ylim",
                       "const ylim in plots",
                       default=False)

    parser.add_argument("--jets",
                        type=str,
                        default="g",
                        help="jet type",
                        choices=['g', 't', 'w', 'z', 'q', 'sig', 'bg'])

    utils.add_bool_arg(parser,
                       "real-only",
                       "use jets with ony real particles",
                       default=False)

    utils.add_bool_arg(parser,
                       "multi-gpu",
                       "use multiple gpus if possible",
                       default=False)

    parser.add_argument(
        "--log-file",
        type=str,
        default="",
        help=
        'log file name - default is name of file in outs/ ; "stdout" prints to console'
    )
    parser.add_argument(
        "--log",
        type=str,
        default="INFO",
        help="log level",
        choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'])

    parser.add_argument("--seed", type=int, default=4, help="torch seed")

    # architecture

    parser.add_argument("--num-hits",
                        type=int,
                        default=30,
                        help="number of hits")
    parser.add_argument("--coords",
                        type=str,
                        default="polarrel",
                        help="cartesian, polarrel or polarrelabspt",
                        choices=['cartesian, polarrel, polarrelabspt'])

    parser.add_argument("--norm",
                        type=float,
                        default=1,
                        help="normalizing max value of features to this value")

    parser.add_argument("--sd",
                        type=float,
                        default=0.2,
                        help="standard deviation of noise")

    parser.add_argument("--node-feat-size",
                        type=int,
                        default=3,
                        help="node feature size")
    parser.add_argument(
        "--hidden-node-size",
        type=int,
        default=32,
        help="hidden vector size of each node (incl node feature size)")
    parser.add_argument(
        "--latent-node-size",
        type=int,
        default=0,
        help=
        "latent vector size of each node - 0 means same as hidden node size")

    parser.add_argument(
        "--clabels",
        type=int,
        default=0,
        help=
        "0 - no clabels, 1 - clabels with pt only, 2 - clabels with pt and eta",
        choices=[0, 1, 2])
    utils.add_bool_arg(parser,
                       "clabels-fl",
                       "use conditional labels in first layer",
                       default=True)
    utils.add_bool_arg(parser,
                       "clabels-hl",
                       "use conditional labels in hidden layers",
                       default=True)

    parser.add_argument("--fn",
                        type=int,
                        nargs='*',
                        default=[256, 256],
                        help="hidden fn layers e.g. 256 256")
    parser.add_argument(
        "--fe1g",
        type=int,
        nargs='*',
        default=0,
        help=
        "hidden and output gen fe layers e.g. 64 128 in the first iteration - 0 means same as fe"
    )
    parser.add_argument(
        "--fe1d",
        type=int,
        nargs='*',
        default=0,
        help=
        "hidden and output disc fe layers e.g. 64 128 in the first iteration - 0 means same as fe"
    )
    parser.add_argument("--fe",
                        type=int,
                        nargs='+',
                        default=[96, 160, 192],
                        help="hidden and output fe layers e.g. 64 128")
    parser.add_argument(
        "--fmg",
        type=int,
        nargs='*',
        default=[64],
        help="mask network layers e.g. 64; input 0 for no intermediate layers")
    parser.add_argument(
        "--mp-iters-gen",
        type=int,
        default=0,
        help="number of message passing iterations in the generator")
    parser.add_argument(
        "--mp-iters-disc",
        type=int,
        default=0,
        help=
        "number of message passing iterations in the discriminator (if applicable)"
    )
    parser.add_argument(
        "--mp-iters",
        type=int,
        default=2,
        help=
        "number of message passing iterations in gen and disc both - will be overwritten by gen or disc specific args if given"
    )
    utils.add_bool_arg(parser,
                       "sum",
                       "mean or sum in models",
                       default=True,
                       no_name="mean")

    utils.add_bool_arg(parser, "int-diffs", "use int diffs", default=False)
    utils.add_bool_arg(parser, "pos-diffs", "use pos diffs", default=False)
    utils.add_bool_arg(parser,
                       "all-ef",
                       "use all node features for edge distance",
                       default=False)
    # utils.add_bool_arg(parser, "scalar-diffs", "use scalar diff (as opposed to vector)", default=True)
    utils.add_bool_arg(parser,
                       "deltar",
                       "use delta r as an edge feature",
                       default=False)
    utils.add_bool_arg(parser,
                       "deltacoords",
                       "use delta coords as edge features",
                       default=False)

    parser.add_argument("--leaky-relu-alpha",
                        type=float,
                        default=0.2,
                        help="leaky relu alpha")

    utils.add_bool_arg(parser,
                       "dea",
                       "use early averaging discriminator",
                       default=True)
    parser.add_argument("--fnd",
                        type=int,
                        nargs='*',
                        default=[],
                        help="hidden disc output layers e.g. 128 64")

    utils.add_bool_arg(
        parser,
        "lfc",
        "use a fully connected network to go from noise vector to initial graph",
        default=False)
    parser.add_argument("--lfc-latent-size",
                        type=int,
                        default=128,
                        help="size of lfc latent vector")

    utils.add_bool_arg(parser,
                       "fully-connected",
                       "use a fully connected graph",
                       default=True)
    parser.add_argument(
        "--num-knn",
        type=int,
        default=10,
        help="# of nearest nodes to connect to (if not fully connected)")
    utils.add_bool_arg(
        parser,
        "self-loops",
        "use self loops in graph - always true for fully connected",
        default=True)

    parser.add_argument("--glorot",
                        type=float,
                        default=0,
                        help="gain of glorot - if zero then glorot not used")

    utils.add_bool_arg(parser, "gtanh", "use tanh for g output", default=True)
    # utils.add_bool_arg(parser, "dearlysigmoid", "use early sigmoid in d", default=False)

    utils.add_bool_arg(parser,
                       "mask-feat",
                       "add mask as fourth feature",
                       default=False)
    utils.add_bool_arg(parser,
                       "mask-feat-bin",
                       "binary fourth feature",
                       default=False)
    utils.add_bool_arg(parser,
                       "mask-weights",
                       "weight D nodes by mask",
                       default=False)
    utils.add_bool_arg(
        parser,
        "mask-manual",
        "manually mask generated nodes with pT less than cutoff",
        default=False)
    utils.add_bool_arg(
        parser,
        "mask-exp",
        "exponentially decaying or binary mask; relevant only if mask-manual is true",
        default=False)
    utils.add_bool_arg(parser,
                       "mask-real-only",
                       "only use masking for real jets",
                       default=False)
    utils.add_bool_arg(parser,
                       "mask-learn",
                       "learn mask from latent vars only use during gen",
                       default=False)
    utils.add_bool_arg(parser,
                       "mask-learn-bin",
                       "binary or continuous learnt mask",
                       default=True)
    utils.add_bool_arg(parser,
                       "mask-learn-sep",
                       "learn mask from separate noise vector",
                       default=False)
    utils.add_bool_arg(parser,
                       "mask-disc-sep",
                       "separate disc network for # particles",
                       default=False)
    utils.add_bool_arg(
        parser,
        "mask-fnd-np",
        "use num masked particles as an additional arg in D (dea will automatically be set true)",
        default=False)
    utils.add_bool_arg(parser, "mask-c", "conditional mask", default=True)
    utils.add_bool_arg(parser,
                       "mask-fne-np",
                       "pass num particles as features into fn and fe",
                       default=False)
    parser.add_argument("--mask-epoch",
                        type=int,
                        default=0,
                        help="# of epochs after which to start masking")

    utils.add_bool_arg(
        parser,
        "noise-padding",
        "use Gaussian noise instead of zero-padding for fake particles",
        default=False)

    # optimization

    parser.add_argument("--optimizer",
                        type=str,
                        default="rmsprop",
                        help="pick optimizer",
                        choices=['adam', 'rmsprop', 'adadelta', 'agcd'])
    parser.add_argument("--loss",
                        type=str,
                        default="ls",
                        help="loss to use - options are og, ls, w, hinge",
                        choices=['og', 'ls', 'w', 'hinge'])

    parser.add_argument("--lr-disc",
                        type=float,
                        default=3e-5,
                        help="learning rate discriminator")
    parser.add_argument("--lr-gen",
                        type=float,
                        default=1e-5,
                        help="learning rate generator")
    parser.add_argument("--beta1",
                        type=float,
                        default=0.9,
                        help="Adam optimizer beta1")
    parser.add_argument("--beta2",
                        type=float,
                        default=0.999,
                        help="Adam optimizer beta2")
    parser.add_argument("--batch-size", type=int, default=0, help="batch size")

    parser.add_argument(
        "--num-critic",
        type=int,
        default=1,
        help="number of critic updates for each generator update")
    parser.add_argument(
        "--num-gen",
        type=int,
        default=1,
        help=
        "number of generator updates for each critic update (num-critic must be 1 for this to apply)"
    )

    # regularization

    utils.add_bool_arg(parser,
                       "batch-norm-disc",
                       "use batch normalization",
                       default=False)
    utils.add_bool_arg(parser,
                       "batch-norm-gen",
                       "use batch normalization",
                       default=False)
    utils.add_bool_arg(parser,
                       "spectral-norm-disc",
                       "use spectral normalization in discriminator",
                       default=False)
    utils.add_bool_arg(parser,
                       "spectral-norm-gen",
                       "use spectral normalization in generator",
                       default=False)

    parser.add_argument("--disc-dropout",
                        type=float,
                        default=0.5,
                        help="fraction of discriminator dropout")
    parser.add_argument("--gen-dropout",
                        type=float,
                        default=0,
                        help="fraction of generator dropout")

    utils.add_bool_arg(parser,
                       "label-smoothing",
                       "use label smoothing with discriminator",
                       default=False)
    parser.add_argument("--label-noise",
                        type=float,
                        default=0,
                        help="discriminator label noise (between 0 and 1)")

    parser.add_argument(
        "--gp",
        type=float,
        default=0,
        help="WGAN generator penalty weight - 0 means not used")

    # augmentation

    # remember to add any new args to the if statement below
    utils.add_bool_arg(parser,
                       "aug-t",
                       "augment with translations",
                       default=False)
    utils.add_bool_arg(parser, "aug-f", "augment with flips", default=False)
    utils.add_bool_arg(parser,
                       "aug-r90",
                       "augment with 90 deg rotations",
                       default=False)
    utils.add_bool_arg(parser, "aug-s", "augment with scalings", default=False)
    parser.add_argument("--translate-ratio",
                        type=float,
                        default=0.125,
                        help="random translate ratio")
    parser.add_argument("--scale-sd",
                        type=float,
                        default=0.125,
                        help="random scale lognormal standard deviation")
    parser.add_argument("--translate-pn-ratio",
                        type=float,
                        default=0.05,
                        help="random translate per node ratio")

    utils.add_bool_arg(parser,
                       "adaptive-prob",
                       "adaptive augment probability",
                       default=False)
    parser.add_argument("--aug-prob",
                        type=float,
                        default=1.0,
                        help="probability of being augmented")

    # evaluation

    utils.add_bool_arg(parser, "fpnd", "calc fpnd", default=True)
    utils.add_bool_arg(
        parser,
        "fjpnd",
        "calc Frechet Joint ParticleNet Distance (for conditional GAN evaluation)",
        default=True)
    # parser.add_argument("--fid-eval-size", type=int, default=8192, help="number of samples generated for evaluating fid")
    parser.add_argument(
        "--fpnd-batch-size",
        type=int,
        default=256,
        help="batch size when generating samples for fpnd eval")
    parser.add_argument("--gpu-batch", type=int, default=50, help="")

    utils.add_bool_arg(
        parser,
        "eval",
        "calculate the evaluation metrics: W1, FNPD, coverage, mmd",
        default=True)
    parser.add_argument("--eval-tot-samples",
                        type=int,
                        default=50000,
                        help='tot # of jets to generate to sample from')

    parser.add_argument("--w1-num-samples",
                        type=int,
                        nargs='+',
                        default=[10000],
                        help='array of # of jet samples to test')

    parser.add_argument(
        "--cov-mmd-num-samples",
        type=int,
        default=100,
        help='size of samples to use for calculating coverage and MMD')
    parser.add_argument("--cov-mmd-num-batches",
                        type=int,
                        default=10,
                        help='# of batches to average coverage and MMD over')

    parser.add_argument("--jf",
                        type=str,
                        nargs='*',
                        default=['mass', 'pt'],
                        help='jet level features to evaluate')
    utils.add_bool_arg(
        parser,
        "efp",
        "calculate EFPs for evaluation (will cause memory spikes so off by default)",
        default=False)

    # ext models

    parser.add_argument("--latent-dim", type=int, default=128, help="")

    parser.add_argument("--rgang-fc",
                        type=int,
                        nargs='+',
                        default=[64, 128],
                        help='rGAN generator layer node sizes')
    parser.add_argument(
        "--rgand-sfc",
        type=int,
        nargs='*',
        default=0,
        help='rGAN discriminator convolutional layer node sizes')
    parser.add_argument("--rgand-fc",
                        type=int,
                        nargs='*',
                        default=0,
                        help='rGAN discriminator layer node sizes')

    parser.add_argument("--pointnetd-pointfc",
                        type=int,
                        nargs='*',
                        default=[64, 128, 1024],
                        help='pointnet discriminator point layer node sizes')
    parser.add_argument("--pointnetd-fc",
                        type=int,
                        nargs='*',
                        default=[512],
                        help='pointnet discriminator final layer node sizes')

    parser.add_argument("--graphcnng-layers",
                        type=int,
                        nargs='+',
                        default=[32, 24],
                        help='GraphCNN-GAN generator layer node sizes')
    utils.add_bool_arg(
        parser,
        "graphcnng-tanh",
        "use tanh activation for final graphcnn generator output",
        default=False)

    args = parser.parse_args()

    return args
コード例 #13
0
ファイル: streamquality.py プロジェクト: greck2908/spot-sdk
 def __init__(self, subparsers, command_dict):
     super(StreamQualityCongestionControlCommand,
           self).__init__(subparsers, command_dict)
     add_bool_arg(self._parser, 'congestion_control')