コード例 #1
0
ファイル: dist_hetr.py プロジェクト: rsumner31/ngraph
2. run `tensorboard --logdir /tmp/hetr_tb/ --port 6006`

use ssh port forwarding to run on remote server
https://stackoverflow.com/questions/37987839/how-can-i-run-tensorboard-on-a-remote-server
"""
from __future__ import print_function
from contextlib import closing
import ngraph as ng
import ngraph.transformers as ngt
from ngraph.frontends.neon import NgraphArgparser
import numpy as np

# Command Line Parser
parser = NgraphArgparser(description="Distributed HeTr Example")
parser.add_argument("--graph_vis",
                    action="store_true",
                    help="enable graph visualization")
args = parser.parse_args()

# Build the graph
H = ng.make_axis(length=6, name='height')
N = ng.make_axis(length=8, name='batch')
W1 = ng.make_axis(length=2, name='W1')
W2 = ng.make_axis(length=4, name='W2')
x = ng.placeholder(axes=[H, N])
w1 = ng.placeholder(axes=[W1, H])
w2 = ng.placeholder(axes=[W2, W1])
with ng.metadata(device_id=('0', '1'), parallel=N):
    dot1 = ng.dot(w1, x).named("dot1")
dot2 = ng.dot(w2, dot1).named("dot2")
コード例 #2
0
ファイル: interactive.py プロジェクト: cdj0311/nlp-architect
from ngraph.frontends.neon import GaussianInit, Adam
from ngraph.frontends.neon import make_bound_computation
from ngraph.frontends.neon import NgraphArgparser
from ngraph.frontends.neon import ArrayIterator
from ngraph.frontends.neon import Saver
import ngraph.transformers as ngt
from nlp_architect.data.babi_dialog import BABI_Dialog
from nlp_architect.models.memn2n_dialogue import MemN2N_Dialog
from utils import interactive_loop
from nlp_architect.utils.io import validate_existing_filepath, validate_parent_exists, validate

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument(
    '--task',
    type=int,
    default='1',
    choices=range(1, 7),
    help='the task ID to train/test on from bAbI-dialog dataset (1-6)')
parser.add_argument(
    '--emb_size',
    type=int,
    default='32',
    help='Size of the word-embedding used in the model. (default 128)')
parser.add_argument(
    '--nhops',
    type=int,
    default='3',
    help='Number of memory hops in the network',
    choices=range(1,10))
parser.add_argument(
    '--use_match_type',
コード例 #3
0
            benchmark.time(num_iterations, n_skip, dataset + '_msra_bprop',
                           visualize, 'device_id'))
    else:
        fprop_computation_op = ng.computation(model_out, 'all')
        benchmark = Benchmark(fprop_computation_op, train_set, inputs,
                              transformer_type, device)
        Benchmark.print_benchmark_results(
            benchmark.time(num_iterations, n_skip, dataset + '_msra_fprop',
                           visualize))


if __name__ == "__main__":
    parser = NgraphArgparser(description='Train deep residual network')
    parser.add_argument('-data',
                        '--data_set',
                        default='cifar10',
                        choices=['cifar10', 'i1k'],
                        help="data set name")
    parser.add_argument('-s',
                        '--skip_iter',
                        type=int,
                        default=1,
                        help="number of iterations to skip")
    parser.add_argument('-m',
                        '--num_devices',
                        nargs='+',
                        type=int,
                        default=[1],
                        help="number of devices to run the benchmark on")
    parser.add_argument('--hetr_device',
                        default='cpu',
コード例 #4
0
        benchmark_fprop = Benchmark(fprop_computation_op, train_set, inputs,
                                    args.backend, args.hetr_device)
        Benchmark.print_benchmark_results(
            benchmark_fprop.time(args.num_iterations,
                                 args.skip_iter,
                                 'ds2_fprop',
                                 args.visualize,
                                 preprocess=True))


if __name__ == "__main__":
    parser = NgraphArgparser(description='Train mini deep speech 2')
    parser.add_argument(
        '--nfilters',
        type=int,
        help='Number of convolutional filters in the first layer',
        default=2)
    parser.add_argument('--filter_width',
                        type=int,
                        help='Width of 1D convolutional filters',
                        default=11)
    parser.add_argument('--str_w', type=int, help='Stride in time', default=1)
    parser.add_argument('--depth',
                        type=int,
                        help='Number of RNN layers',
                        default=1)
    parser.add_argument(
        '--hidden_size',
        type=int,
        help='Number of hidden units in the RNN and affine layers',
コード例 #5
0
ファイル: mnist_dcgan.py プロジェクト: ugiwgh/ngraph
                                    size=self.shape)
        else:
            raise StopIteration

    def next(self):
        return self.__next__()

    def __iter__(self):
        return self


# parse command line arguments
parser = NgraphArgparser()
parser.add_argument(
    '--plot_interval',
    type=int,
    default=200,
    help='save generated images with a period of this many iterations')
parser.add_argument('--seed', type=int, default=0, help='random seed')
args = parser.parse_args()
np.random.seed(args.rng_seed)

args.batch_size = 32

# Create the dataloader
train_data, valid_data = MNIST(args.data_dir).load_data()
train_set = ArrayIterator(train_data, args.batch_size)

# noise source
noise_dim = (2, 1, 3, 3)
noise_generator = Noise(train_set.ndata,
コード例 #6
0
from __future__ import division
from __future__ import print_function
from contextlib import closing
import ngraph as ng
from ngraph.frontends.neon import UniformInit, RMSProp, ax, Tanh, Logistic
from ngraph.frontends.neon import NgraphArgparser, make_bound_computation
from ngraph.frontends.neon import LSTM
import ngraph.transformers as ngt
from ngraph.frontends.neon.data.tsp import TSP
from tsp_seqarrayiter import TSPSequentialArrayIterator
from utils import save_plot

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument('--train_file', default='tsp5.txt',
                    choices=['tsp5.txt', 'tsp10.txt'],
                    help='specify training filename')
parser.add_argument('--test_file', default='tsp5_test.txt',
                    choices=['tsp5_test.txt', 'tsp10_test.txt'],
                    help='specify testing filename')
parser.add_argument('--lr', type=float, default=0.0025, help='learning rate')
parser.add_argument('--hs', type=int, default=256, help='hidden unit size')
parser.add_argument('--emb', type=bool, default=True, help='use embedding')
parser.set_defaults()
args = parser.parse_args()
args.batch_size = 128
args.num_iterations = 20000

gradient_clip_value = 2
num_features = 2  # for planar TSP, each city's location is represented by a 2-d coordinate
コード例 #7
0
    gen_txt = [index_to_token[i] for i in gen_txt]
    return gen_txt


def expand_onehot(x):
    """
    Simply converts an integer to a one-hot vector of the same size as out_axis
    """
    return ng.one_hot(x, axis=out_axis)


# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument(
    '--use_embedding',
    default=False,
    dest='use_embedding',
    action='store_true',
    help='If given, embedding layer is used as the first layer')
parser.add_argument('--seq_len',
                    type=int,
                    help="Number of time points in each input sequence",
                    default=32)
parser.add_argument('--recurrent_units',
                    type=int,
                    help="Number of recurrent units in the network",
                    default=256)
parser.set_defaults(num_iterations=20000)
args = parser.parse_args()

use_embedding = args.use_embedding
recurrent_units = args.recurrent_units
コード例 #8
0
usage: python toy_wgan.py -b gpu -t 100000
"""
from contextlib import closing
import ngraph.transformers as ngt
from ngraph.frontends.neon import Adam, Affine, Rectlin, Sequential
from ngraph.frontends.neon import ConstantInit, KaimingInit
from ngraph.frontends.neon import make_bound_computation, NgraphArgparser
from ngraph.frontends.neon.logging import ProgressBar
import ngraph as ng
import os
import numpy as np
from toy_utils import DataGenerator, NormalNoise, generate_plot


parser = NgraphArgparser()
parser.add_argument('--plot_interval', type=int, default=200,
                    help='Plot results every this many iterations')
parser.add_argument('--loss_type', type=str, default='WGAN-GP',
                    help='Choose loss type', choices=['WGAN-GP', 'WGAN'])
parser.add_argument('--gp_scale', type=int, default=1,
                    help='Scale of the gradient penalty')
parser.add_argument('--w_clip', type=int, default=0.01,
                    help='Weight clipping value for WGAN')
parser.add_argument('--data_type', type=str, default='Roll',
                    help='Choose ground truth distribution',
                    choices=['Rectangular', 'Circular', 'Roll'])
parser.add_argument('--dim', type=int, default=512,
                    help='Hidden layer dimension for the model')
parser.add_argument('--num_critic', type=int, default=5,
                    help='Number of discriminator iterations per generator iteration')
parser.add_argument('--plot_dir', type=str, default='WGAN_Toy_Plots',
                    help='Directory name to save the results')
コード例 #9
0
ファイル: mnist_mlp.py プロジェクト: rsumner31/ngraph
import numpy as np
import ngraph as ng
from ngraph.frontends.neon import Layer, Affine, Preprocess, Sequential
from ngraph.frontends.neon import GaussianInit, Rectlin, Logistic, GradientDescentMomentum
from ngraph.frontends.neon import ax, loop_train, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import loop_eval
from ngraph.frontends.neon import NgraphArgparser
from ngraph.frontends.neon import ArrayIterator

from ngraph.frontends.neon import MNIST
from ngraph.frontends.neon import Saver
import ngraph.transformers as ngt

parser = NgraphArgparser(description='Train simple mlp on mnist dataset')
parser.add_argument('--save_file',
                    type=str,
                    default=None,
                    help="File to save weights")
parser.add_argument('--load_file',
                    type=str,
                    default=None,
                    help="File to load weights")
parser.add_argument('--inference',
                    action="store_true",
                    help="Run Inference with loaded weight")
args = parser.parse_args()

if args.inference and (args.load_file is None):
    print("Need to set --load_file for Inference problem")
    quit()

if args.save_file is not None:
コード例 #10
0
from ngraph.frontends.neon import make_bound_computation
from ngraph.frontends.neon import NgraphArgparser
from ngraph.frontends.neon import ArrayIterator
from ngraph.frontends.neon import Saver
import ngraph.transformers as ngt

from nlp_architect.models.kvmemn2n import KVMemN2N
from nlp_architect.data.wikimovies import WIKIMOVIES
from nlp_architect.utils.io import validate_parent_exists, check_size
from examples.kvmemn2n.interactive_util import interactive_loop

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument(
    '--emb_size',
    type=int,
    default='50',
    help='Size of the word-embedding used in the model. (default 50)',
    action=check_size(1, 20000))
parser.add_argument('--nhops',
                    type=int,
                    default='3',
                    help='Number of memory hops in the network',
                    action=check_size(1, 20))
parser.add_argument('--lr',
                    type=float,
                    default=0.01,
                    help='learning rate',
                    action=check_size(0, 5))
parser.add_argument('--subset',
                    type=str,
                    default='wiki-entities',
コード例 #11
0
from ngraph.frontends.neon import GaussianInit, Adam
from ngraph.frontends.neon import make_bound_computation
from ngraph.frontends.neon import NgraphArgparser
from ngraph.frontends.neon import ArrayIterator
from ngraph.frontends.neon import Saver
import ngraph.transformers as ngt
from nlp_architect.data.babi_dialog import BABI_Dialog
from nlp_architect.models.memn2n_dialogue import MemN2N_Dialog
from utils import interactive_loop
from nlp_architect.utils.io import validate_existing_filepath, validate_parent_exists, validate

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument(
    '--task',
    type=int,
    default='1',
    choices=range(1, 7),
    help='the task ID to train/test on from bAbI-dialog dataset (1-6)')
parser.add_argument(
    '--emb_size',
    type=int,
    default='32',
    help='Size of the word-embedding used in the model. (default 128)')
parser.add_argument('--nhops',
                    type=int,
                    default='3',
                    help='Number of memory hops in the network',
                    choices=range(1, 10))
parser.add_argument('--use_match_type',
                    default=False,
                    action='store_true',
コード例 #12
0
ファイル: train_resnet.py プロジェクト: rsumner31/ngraph
    base_lr = 0.1
    gamma = 0.1
    momentum_coef = 0.9
    wdecay = 0.0001
    nesterov = False

    print("HyperParameters")
    print("Learning Rate:     " + str(base_lr))
    print("Momentum:          " + str(momentum_coef))
    print("Weight Decay:      " + str(wdecay))
    print("Nesterov:          " + str(nesterov))

    # Command Line Parser
    parser = NgraphArgparser(description="Resnet for Imagenet and Cifar10")
    parser.add_argument('--dataset',
                        type=str,
                        default="cifar10",
                        help="Enter cifar10 or i1k")
    parser.add_argument('--size',
                        type=int,
                        default=56,
                        help="Enter size of resnet")
    parser.add_argument('--tb',
                        action="store_true",
                        help="1- Enables tensorboard")
    parser.add_argument('--logfile',
                        type=str,
                        default=None,
                        help="Name of the csv which \
                        logs different metrics of model")
    parser.add_argument('--hetr_device',
                        type=str,
コード例 #13
0
ファイル: video_c3d.py プロジェクト: rsumner31/ngraph
    valid_set = make_validation_loader(manifest[0], manifest_root, batch_size,
                                       subset_pct)

    return train_set, valid_set


if __name__ == "__main__":

    # Load training configuration and parse arguments
    train_config = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                'train.cfg')
    config_files = [train_config] if os.path.exists(train_config) else []
    parser = NgraphArgparser(__doc__, default_config_files=config_files)

    parser.add_argument('--subset_pct',
                        type=float,
                        default=100,
                        help='subset of training dataset to use (percentage)')
    parser.add_argument('--log_file',
                        type=str,
                        default='training_log.pkl',
                        help='name for the trainig log file')
    args = parser.parse_args()

    np.random.seed = args.rng_seed

    # Load data
    train_set, valid_set = get_data(args.manifest, args.manifest_root,
                                    args.batch_size, args.subset_pct,
                                    args.rng_seed)

    # Define model and train
コード例 #14
0
ファイル: train.py プロジェクト: cdj0311/nlp-architect
    cal_f1_score)
import math
from nlp_architect.contrib.ngraph.weight_initilizers import (make_placeholder, make_weights)
from nlp_architect.utils.io import sanitize_path
from nlp_architect.utils.io import validate, validate_existing_directory, \
    validate_existing_filepath, validate_parent_exists, check_size


"""
Training script for reading comprehension model

"""
# parse the command line arguments
parser = NgraphArgparser(__doc__)

parser.add_argument('--data_path', help='enter path for training data',
                    type=str)

parser.add_argument('--gpu_id', default="0", help='enter gpu id',
                    type=str,action=check_size(0,10))

parser.add_argument('--max_para_req', default=100, help='enter the max length of paragraph',
                    type=int, action=check_size(30,300))

parser.add_argument('--batch_size_squad',default=16, help='enter the batch size',
                    type=int, action=check_size(1,256))

parser.set_defaults()

args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
コード例 #15
0
ファイル: char_conv.py プロジェクト: rsumner31/ngraph
               bias_init=ConstantInit(0.),
               activation=Rectlin()))
    layers.append(Dropout(keep=0.5))
    layers.append(
        Affine(axes=(ax.Y, ),
               weight_init=init,
               bias_init=ConstantInit(0.),
               activation=Softmax()))

    return layers


# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument('--sentence_length',
                    type=int,
                    default=1014,
                    help='the number of characters in a sentence')
parser.add_argument(
    '--use_uppercase',
    action='store_true',
    default=False,
    help='whether to use uppercase characters in the vocabulary')
parser.add_argument('--use_large',
                    action='store_true',
                    default=False,
                    help='whether to use the large model')
parser.add_argument('-e',
                    '--num_epochs',
                    type=int,
                    default=10,
                    help='the number of epochs to train')
コード例 #16
0
ファイル: deepspeech.py プロジェクト: rsumner31/ngraph
                          action="store_true",
                          help="Use Nesterov accelerated gradient")

    data_params = parser.add_argument_group("Data Parameters")
    data_params.add_argument('--max_length',
                             type=float,
                             help="max duration for each audio sample",
                             default=7.5)
    data_params.add_argument("--manifest_train",
                             help="Path to training manifest file")
    data_params.add_argument("--manifest_val",
                             help="Path to validation manifest file")

    # TODO: Remove this once testing is further along
    parser.add_argument("--small",
                        action="store_true",
                        help="Use a small version of the model with fake data")
    args = parser.parse_args()

    if args.small is True:
        args.nfilters = 20
        args.depth = 3
        args.hidden_size = 20
        args.max_length = .3
        args.fake = True

    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)
    logger.addHandler(PBStreamHandler(level=logging.DEBUG))

    # Data parameters
コード例 #17
0
ファイル: train_model.py プロジェクト: cdj0311/nlp-architect
from ngraph.frontends.neon import ArrayIterator
from ngraph.frontends.neon import Saver
import ngraph.transformers as ngt
from nlp_architect.models.memn2n_dialogue import MemN2N_Dialog
import numpy as np
import os
from tqdm import tqdm
from utils import interactive_loop
from nlp_architect.utils.io import validate_parent_exists, check_size, validate


# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument(
    '--task',
    type=int,
    default='1',
    choices=range(1,7),
    help='the task ID to train/test on from bAbI-dialog dataset (1-6)')
parser.add_argument(
    '--emb_size',
    type=int,
    default='32',
    help='Size of the word-embedding used in the model.')
parser.add_argument(
    '--nhops',
    type=int,
    default='3',
    help='Number of memory hops in the network',
    choices=range(1,10))
parser.add_argument(
    '--use_match_type',
コード例 #18
0
            }
        else:
            for name, res in zip(metric_names, results):
                all_results[name].extend(list(res))

    reduced_results = {
        k: np.mean(v[:dataset._dataloader.ndata])
        for k, v in all_results.items() if k != 'predictions'
    }
    return all_results, reduced_results


parser = NgraphArgparser(description=__doc__)
parser.add_argument('--mini',
                    default=False,
                    dest='mini',
                    action='store_true',
                    help='If given, builds a mini version of Inceptionv3')
parser.add_argument("--image_dir",
                    default='/dataset/aeon/I1K/i1k-extracted/',
                    help="Path to extracted imagenet data")
parser.add_argument("--train_manifest_file",
                    default='train-index-tabbed.csv',
                    help="Name of tab separated Aeon training manifest file")
parser.add_argument("--valid_manifest_file",
                    default='val-index-tabbed.csv',
                    help="Name of tab separated Aeon validation manifest file")
parser.add_argument("--optimizer_name",
                    default='rmsprop',
                    help="Name of optimizer (rmsprop or sgd)")
parser.set_defaults(batch_size=4, num_iterations=10000000, iter_interval=2000)
コード例 #19
0
import ngraph as ng
from ngraph.frontends.neon import Layer, Sequential, LSTM, Affine
from ngraph.frontends.neon import UniformInit, Tanh, Logistic, Identity, Adam
from ngraph.frontends.neon import NgraphArgparser, loop_train
from ngraph.frontends.neon import make_bound_computation, make_default_callbacks
import ngraph.transformers as ngt
from ngraph.frontends.neon import ArrayIterator
import timeseries
import utils
import imp

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument('--predict_seq',
                    default=False,
                    dest='predict_seq',
                    action='store_true',
                    help='If given, seq_len future timepoints are predicted')
parser.add_argument('--look_ahead',
                    type=int,
                    help="Number of time steps to start predicting from",
                    default=1)
parser.add_argument('--seq_len',
                    type=int,
                    help="Number of time points in each input sequence",
                    default=32)
parser.add_argument('--epochs', type=int, help="Number of epochs", default=200)
parser.set_defaults()
args = parser.parse_args()

# Plot the inference / generation results
コード例 #20
0
ファイル: char_lstm.py プロジェクト: ugiwgh/ngraph
from contextlib import closing
import ngraph as ng
from ngraph.frontends.neon import (Layer, Sequential, Preprocess, LSTM, Affine,
                                   Softmax, Tanh, Logistic)
from ngraph.frontends.neon import UniformInit, RMSProp
from ngraph.frontends.neon import ax, loop_train
from ngraph.frontends.neon import NgraphArgparser, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import SequentialArrayIterator
import ngraph.transformers as ngt

from ngraph.frontends.neon import PTB

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument('--layer_type',
                    default='lstm',
                    choices=['lstm'],
                    help='type of recurrent layer to use (lstm)')
parser.set_defaults()
args = parser.parse_args()

# these hyperparameters are from the paper
args.batch_size = 64
time_steps = 50
hidden_size = 128
gradient_clip_value = 5

# download penn treebank
tree_bank_data = PTB(path=args.data_dir)
ptb_data = tree_bank_data.load_data()
train_set = SequentialArrayIterator(ptb_data['train'],
                                    batch_size=args.batch_size,
コード例 #21
0
ファイル: lsun_wgan.py プロジェクト: rsumner31/ngraph
                            epsilon=1e-8,
                            weight_clip_value=weight_clip_value)

    if loss_type == "WGAN-GP":
        optimizer = Adam(learning_rate=1e-4,
                         beta_1=0.5,
                         beta_2=0.9,
                         epsilon=1e-8,
                         weight_clip_value=weight_clip_value)

    return optimizer


parser = NgraphArgparser(description='WGAN on LSUN bedroom dataset')
parser.add_argument('--plot_interval',
                    type=int,
                    default=500,
                    help='display generated samples at this frequency')
parser.add_argument('--lsun_dir',
                    default="/dataset/lsun",
                    help='LSUN data directory')
parser.add_argument(
    '--subset_pct',
    type=float,
    default=50.0,
    help='subset of training dataset to use (percentage), default 50.0')
parser.add_argument('--loss_type',
                    default='WGAN-GP',
                    help='Loss Function',
                    choices=['WGAN', 'WGAN-GP'])
parser.add_argument('--im_size',
                    type=int,
コード例 #22
0
ファイル: imdb_rnn.py プロジェクト: ami-GS/ngraph
from contextlib import closing
import ngraph as ng
from ngraph.frontends.neon import (Layer, Sequential, BiRNN, Recurrent, Affine,
                                   Softmax, Tanh, LookupTable)
from ngraph.frontends.neon import UniformInit, RMSProp
from ngraph.frontends.neon import ax, loop_train, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import NgraphArgparser
from ngraph.frontends.neon import ArrayIterator
import ngraph.transformers as ngt

from imdb import IMDB

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument('--layer_type',
                    default='rnn',
                    choices=['rnn', 'birnn'],
                    help='type of recurrent layer to use (rnn or birnn)')
parser.set_defaults()
args = parser.parse_args()

# these hyperparameters are from the paper
args.batch_size = 128
time_steps = 128
hidden_size = 10
gradient_clip_value = 15
embed_size = 128
vocab_size = 20000
pad_idx = 0

# download IMDB
imdb_dataset = IMDB(path=args.data_dir,
コード例 #23
0
def default_argparser():
    # parse the command line arguments
    parser = NgraphArgparser(__doc__)
    parser.add_argument(
        '--predict_seq',
        default=False,
        dest='predict_seq',
        action='store_true',
        help='If given, seq_len future timepoints are predicted')
    parser.add_argument('--look_ahead',
                        type=int,
                        help="Number of time steps to start predicting from",
                        default=1)
    parser.add_argument('--seq_len',
                        type=int,
                        help="Number of time points in each input sequence",
                        default=32)
    parser.add_argument(
        '--log_interval',
        type=int,
        default=100,
        help="frequency, in number of iterations, after which loss is evaluated"
    )
    parser.add_argument('--save_plots',
                        action="store_true",
                        help="save plots to disk")
    parser.add_argument('--results_dir',
                        type=str,
                        help="Directory to write results to",
                        default='./')
    parser.add_argument('--resume',
                        type=str,
                        default=None,
                        help="weights of the model to resume training with")
    parser.set_defaults()

    return parser
コード例 #24
0
from __future__ import print_function
from contextlib import closing
import numpy as np
import ngraph as ng
from ngraph.frontends.neon import Layer, Affine, Preprocess, Convolution, Pool2D, Sequential
from ngraph.frontends.neon import UniformInit, Rectlin, Softmax, GradientDescentMomentum
from ngraph.frontends.neon import ax, loop_train
from ngraph.frontends.neon import NgraphArgparser, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import ArrayIterator

from ngraph.frontends.neon import CIFAR10
import ngraph.transformers as ngt

parser = NgraphArgparser(description='Train simple CNN on cifar10 dataset')
parser.add_argument('--use_batch_norm',
                    action='store_true',
                    help='whether to use batch normalization')
args = parser.parse_args()

np.random.seed(args.rng_seed)

# Create the dataloader
train_data, valid_data = CIFAR10(args.data_dir).load_data()
train_set = ArrayIterator(train_data,
                          args.batch_size,
                          total_iterations=args.num_iterations)
valid_set = ArrayIterator(valid_data, args.batch_size)

inputs = train_set.make_placeholders()
ax.Y.length = 10
コード例 #25
0
import ngraph.transformers as ngt
from ngraph.frontends.neon import (Sequential, Deconvolution, Convolution,
                                   Rectlin, Logistic, Tanh, Adam,
                                   ArrayIterator, KaimingInit,
                                   make_bound_computation)
from ngraph.frontends.neon import NgraphArgparser
from ngraph.frontends.neon import MNIST
from ngraph.frontends.neon.logging import ProgressBar
from ngraph.util.names import name_scope
from utils import save_plots, get_image, train_schedule, Noise

# parse command line arguments
parser = NgraphArgparser()
parser.add_argument(
    '--plot_interval',
    type=int,
    default=200,
    help='Save generated images with a period of this many iterations')
parser.add_argument('--loss_type',
                    default='WGAN-GP',
                    help='Loss Function',
                    choices=['DCGAN', 'WGAN', 'WGAN-GP'])
parser.add_argument('--gp_scale',
                    type=int,
                    default=10,
                    help='Scale of the gradient penalty')
parser.add_argument('--w_clip',
                    type=int,
                    default=0.01,
                    help='Weight clipping value for WGAN')
parser.add_argument('--plot_dir',
コード例 #26
0
ファイル: char_rnn.py プロジェクト: jlwhite709/ngraph
import ngraph as ng
from ngraph.frontends.neon import (Layer, Sequential, Preprocess, BiRNN,
                                   Recurrent, Affine, Softmax, Tanh,
                                   LookupTable)
from ngraph.frontends.neon import UniformInit, RMSProp
from ngraph.frontends.neon import ax, loop_train
from ngraph.frontends.neon import NgraphArgparser, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import SequentialArrayIterator
import ngraph.transformers as ngt

from ngraph.frontends.neon import PTB

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument('--layer_type',
                    default='rnn',
                    choices=['rnn', 'birnn'],
                    help='type of recurrent layer to use (rnn or birnn)')
parser.add_argument('--use_lut',
                    action='store_true',
                    help='choose to use lut as first layer')
parser.set_defaults()
args = parser.parse_args()

# these hyperparameters are from the paper
args.batch_size = 50
time_steps = 150
hidden_size = 500

# download penn treebank
tree_bank_data = PTB(path=args.data_dir)
ptb_data = tree_bank_data.load_data()
コード例 #27
0
ファイル: train.py プロジェクト: rsumner31/ngraph
    embeddings_placeholders = []
    for lut in range(len(data.parameters['dimensions_embeddings'])):
        embedding_placeholder = ng.placeholder(ng.make_axes(
            [placeholders['N']]),
                                               name="EMB")
        embeddings_placeholders.append(embedding_placeholder)

    placeholders['embeddings_placeholders'] = embeddings_placeholders

    return placeholders


parser = NgraphArgparser(description=__doc__)
parser.add_argument("--learning_rate",
                    type=float,
                    default=0.01,
                    help="Learning rate")
parser.add_argument("--epochs", type=int, default=41, help="Number of epochs")
parser.add_argument(
    "--deep_parameters",
    default='100,50',
    type=str,
    help=
    "Comma separated list of hidden neurons on the deep section of the model")
parser.set_defaults(batch_size=40)

args = parser.parse_args()

fc_layers_deep = [int(s) for s in args.deep_parameters.split(',')]

cs_loader = data.CensusDataset(args.batch_size)
コード例 #28
0
            for name, res in zip(metric_names, results):
                all_results[name].extend(list(res))

    reduced_results = {
        k: np.mean(v[:dataset.ndata])
        for k, v in all_results.items()
    }
    return reduced_results


if __name__ == "__main__":
    parser = NgraphArgparser(
        description='Train deep residual network on cifar10 dataset')
    parser.add_argument(
        '--stage_depth',
        type=int,
        default=2,
        help='depth of each stage (network depth will be 9n+2)')
    parser.add_argument('--use_aeon',
                        action='store_true',
                        help='whether to use aeon dataloader')
    args = parser.parse_args()

    np.random.seed(args.rng_seed)

    # Create the dataloader
    if args.use_aeon:
        from data import make_aeon_loaders
        train_set, valid_set = make_aeon_loaders(args.data_dir,
                                                 args.batch_size,
                                                 args.num_iterations)
コード例 #29
0
ファイル: train.py プロジェクト: Asteur/NervanaNlpApch
                   get_data_array_squad_ngraph, cal_f1_score)
import math
from nlp_architect.contrib.ngraph.weight_initilizers import (make_placeholder,
                                                             make_weights)
from nlp_architect.utils.io import sanitize_path
from nlp_architect.utils.io import validate, validate_existing_directory, \
    validate_existing_filepath, validate_parent_exists, check_size
"""
Training script for reading comprehension model

"""
# parse the command line arguments
parser = NgraphArgparser(__doc__)

parser.add_argument('--data_path',
                    help='enter path for training data',
                    type=str)

parser.add_argument('--gpu_id',
                    default="0",
                    help='enter gpu id',
                    type=str,
                    action=check_size(0, 10))

parser.add_argument('--max_para_req',
                    default=100,
                    help='enter the max length of paragraph',
                    type=int,
                    action=check_size(30, 300))

parser.add_argument('--batch_size_squad',