コード例 #1
0
                           preprocess=True))
    else:
        fprop_computation_op = ng.computation(model_out, "all")

        benchmark_fprop = Benchmark(fprop_computation_op, train_set, inputs,
                                    args.backend, args.hetr_device)
        Benchmark.print_benchmark_results(
            benchmark_fprop.time(args.num_iterations,
                                 args.skip_iter,
                                 'ds2_fprop',
                                 args.visualize,
                                 preprocess=True))


if __name__ == "__main__":
    parser = NgraphArgparser(description='Train mini deep speech 2')
    parser.add_argument(
        '--nfilters',
        type=int,
        help='Number of convolutional filters in the first layer',
        default=2)
    parser.add_argument('--filter_width',
                        type=int,
                        help='Width of 1D convolutional filters',
                        default=11)
    parser.add_argument('--str_w', type=int, help='Stride in time', default=1)
    parser.add_argument('--depth',
                        type=int,
                        help='Number of RNN layers',
                        default=1)
    parser.add_argument(
コード例 #2
0
ファイル: char_rnn.py プロジェクト: jlwhite709/ngraph
from contextlib import closing
import ngraph as ng
from ngraph.frontends.neon import (Layer, Sequential, Preprocess, BiRNN,
                                   Recurrent, Affine, Softmax, Tanh,
                                   LookupTable)
from ngraph.frontends.neon import UniformInit, RMSProp
from ngraph.frontends.neon import ax, loop_train
from ngraph.frontends.neon import NgraphArgparser, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import SequentialArrayIterator
import ngraph.transformers as ngt

from ngraph.frontends.neon import PTB

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument('--layer_type',
                    default='rnn',
                    choices=['rnn', 'birnn'],
                    help='type of recurrent layer to use (rnn or birnn)')
parser.add_argument('--use_lut',
                    action='store_true',
                    help='choose to use lut as first layer')
parser.set_defaults()
args = parser.parse_args()

# these hyperparameters are from the paper
args.batch_size = 50
time_steps = 150
hidden_size = 500
コード例 #3
0
Reference paper: https://arxiv.org/pdf/1506.03134.pdf
"""
from __future__ import division
from __future__ import print_function
from contextlib import closing
import ngraph as ng
from ngraph.frontends.neon import UniformInit, RMSProp, ax, Tanh, Logistic
from ngraph.frontends.neon import NgraphArgparser, make_bound_computation
from ngraph.frontends.neon import LSTM
import ngraph.transformers as ngt
from ngraph.frontends.neon.data.tsp import TSP
from tsp_seqarrayiter import TSPSequentialArrayIterator
from utils import save_plot

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument('--train_file', default='tsp5.txt',
                    choices=['tsp5.txt', 'tsp10.txt'],
                    help='specify training filename')
parser.add_argument('--test_file', default='tsp5_test.txt',
                    choices=['tsp5_test.txt', 'tsp10_test.txt'],
                    help='specify testing filename')
parser.add_argument('--lr', type=float, default=0.0025, help='learning rate')
parser.add_argument('--hs', type=int, default=256, help='hidden unit size')
parser.add_argument('--emb', type=bool, default=True, help='use embedding')
parser.set_defaults()
args = parser.parse_args()
args.batch_size = 128
args.num_iterations = 20000

gradient_clip_value = 2
コード例 #4
0
        benchmark = Benchmark(batch_cost_computation_op, train_set, inputs,
                              transformer_type, device)
        Benchmark.print_benchmark_results(
            benchmark.time(num_iterations, n_skip, dataset + '_msra_bprop',
                           visualize, 'device_id'))
    else:
        fprop_computation_op = ng.computation(model_out, 'all')
        benchmark = Benchmark(fprop_computation_op, train_set, inputs,
                              transformer_type, device)
        Benchmark.print_benchmark_results(
            benchmark.time(num_iterations, n_skip, dataset + '_msra_fprop',
                           visualize))


if __name__ == "__main__":
    parser = NgraphArgparser(description='Train deep residual network')
    parser.add_argument('-data',
                        '--data_set',
                        default='cifar10',
                        choices=['cifar10', 'i1k'],
                        help="data set name")
    parser.add_argument('-s',
                        '--skip_iter',
                        type=int,
                        default=1,
                        help="number of iterations to skip")
    parser.add_argument('-m',
                        '--num_devices',
                        nargs='+',
                        type=int,
                        default=[1],
コード例 #5
0
ファイル: char_rae.py プロジェクト: psdurley/ngraph
"""

import numpy as np
from contextlib import closing
import ngraph as ng
from ngraph.frontends.neon import Layer, Preprocess, Recurrent, Affine, Softmax, Tanh
from ngraph.frontends.neon import UniformInit, RMSProp
from ngraph.frontends.neon import ax, loop_train
from ngraph.frontends.neon import NgraphArgparser, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import SequentialArrayIterator
import ngraph.transformers as ngt

from ngraph.frontends.neon import PTB

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.set_defaults(batch_size=128, num_iterations=2000)
args = parser.parse_args()

# model parameters
time_steps = 5
hidden_size = 256
gradient_clip_value = 5

# download penn treebank
# set shift_target to be False, since it is going to predict the same sequence
tree_bank_data = PTB(path=args.data_dir, shift_target=False)
ptb_data = tree_bank_data.load_data()
train_set = SequentialArrayIterator(ptb_data['train'],
                                    batch_size=args.batch_size,
                                    time_steps=time_steps,
コード例 #6
0
ファイル: mnist_dcgan.py プロジェクト: ugiwgh/ngraph
            self.cnt += 1
            return np.random.normal(loc=self.mean,
                                    scale=self.std,
                                    size=self.shape)
        else:
            raise StopIteration

    def next(self):
        return self.__next__()

    def __iter__(self):
        return self


# parse command line arguments
parser = NgraphArgparser()
parser.add_argument(
    '--plot_interval',
    type=int,
    default=200,
    help='save generated images with a period of this many iterations')
parser.add_argument('--seed', type=int, default=0, help='random seed')
args = parser.parse_args()
np.random.seed(args.rng_seed)

args.batch_size = 32

# Create the dataloader
train_data, valid_data = MNIST(args.data_dir).load_data()
train_set = ArrayIterator(train_data, args.batch_size)
コード例 #7
0
from __future__ import division, print_function
from contextlib import closing
import ngraph as ng
from ngraph.frontends.neon import Layer, Sequential, LSTM, Affine
from ngraph.frontends.neon import UniformInit, Tanh, Logistic, Identity, Adam
from ngraph.frontends.neon import NgraphArgparser, loop_train
from ngraph.frontends.neon import make_bound_computation, make_default_callbacks
import ngraph.transformers as ngt
from ngraph.frontends.neon import ArrayIterator
import timeseries
import utils
import imp

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument('--predict_seq',
                    default=False,
                    dest='predict_seq',
                    action='store_true',
                    help='If given, seq_len future timepoints are predicted')
parser.add_argument('--look_ahead',
                    type=int,
                    help="Number of time steps to start predicting from",
                    default=1)
parser.add_argument('--seq_len',
                    type=int,
                    help="Number of time points in each input sequence",
                    default=32)
parser.add_argument('--epochs', type=int, help="Number of epochs", default=200)
parser.set_defaults()
コード例 #8
0
usage: python toy_wgan.py -b gpu -t 100000
"""
from contextlib import closing
import ngraph.transformers as ngt
from ngraph.frontends.neon import Adam, Affine, Rectlin, Sequential
from ngraph.frontends.neon import ConstantInit, KaimingInit
from ngraph.frontends.neon import make_bound_computation, NgraphArgparser
from ngraph.frontends.neon.logging import ProgressBar
import ngraph as ng
import os
import numpy as np
from toy_utils import DataGenerator, NormalNoise, generate_plot


parser = NgraphArgparser()
parser.add_argument('--plot_interval', type=int, default=200,
                    help='Plot results every this many iterations')
parser.add_argument('--loss_type', type=str, default='WGAN-GP',
                    help='Choose loss type', choices=['WGAN-GP', 'WGAN'])
parser.add_argument('--gp_scale', type=int, default=1,
                    help='Scale of the gradient penalty')
parser.add_argument('--w_clip', type=int, default=0.01,
                    help='Weight clipping value for WGAN')
parser.add_argument('--data_type', type=str, default='Roll',
                    help='Choose ground truth distribution',
                    choices=['Rectangular', 'Circular', 'Roll'])
parser.add_argument('--dim', type=int, default=512,
                    help='Hidden layer dimension for the model')
parser.add_argument('--num_critic', type=int, default=5,
                    help='Number of discriminator iterations per generator iteration')
コード例 #9
0
ファイル: train_model.py プロジェクト: cdj0311/nlp-architect
from ngraph.frontends.neon import GaussianInit, Adam
from ngraph.frontends.neon import make_bound_computation
from ngraph.frontends.neon import NgraphArgparser
from ngraph.frontends.neon import ArrayIterator
from ngraph.frontends.neon import Saver
import ngraph.transformers as ngt
from nlp_architect.models.memn2n_dialogue import MemN2N_Dialog
import numpy as np
import os
from tqdm import tqdm
from utils import interactive_loop
from nlp_architect.utils.io import validate_parent_exists, check_size, validate


# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument(
    '--task',
    type=int,
    default='1',
    choices=range(1,7),
    help='the task ID to train/test on from bAbI-dialog dataset (1-6)')
parser.add_argument(
    '--emb_size',
    type=int,
    default='32',
    help='Size of the word-embedding used in the model.')
parser.add_argument(
    '--nhops',
    type=int,
    default='3',
コード例 #10
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import ngraph as ng
from ngraph.frontends.neon import Sequential, Preprocess, Recurrent, Affine, Softmax, Tanh
from ngraph.frontends.neon import UniformInit, RMSProp
from ngraph.frontends.neon import ax, loop_train, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import NgraphArgparser
from ngraph.frontends.neon import SequentialArrayIterator
import ngraph.transformers as ngt

from ptb import PTB

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.set_defaults(gen_be=False)
args = parser.parse_args()

# these hyperparameters are from the paper
args.batch_size = 50
time_steps = 10
hidden_size = 20
gradient_clip_value = 15

# download penn treebank
tree_bank_data = PTB(path=args.data_dir)
ptb_data = tree_bank_data.load_data()
train_set = SequentialArrayIterator(ptb_data['train'],
                                    batch_size=args.batch_size,
                                    time_steps=time_steps,
コード例 #11
0
ファイル: char_conv.py プロジェクト: rsumner31/ngraph
        Affine(nout=fc_nout,
               weight_init=init,
               bias_init=ConstantInit(0.),
               activation=Rectlin()))
    layers.append(Dropout(keep=0.5))
    layers.append(
        Affine(axes=(ax.Y, ),
               weight_init=init,
               bias_init=ConstantInit(0.),
               activation=Softmax()))

    return layers


# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument('--sentence_length',
                    type=int,
                    default=1014,
                    help='the number of characters in a sentence')
parser.add_argument(
    '--use_uppercase',
    action='store_true',
    default=False,
    help='whether to use uppercase characters in the vocabulary')
parser.add_argument('--use_large',
                    action='store_true',
                    default=False,
                    help='whether to use the large model')
parser.add_argument('-e',
                    '--num_epochs',
コード例 #12
0
        gen_txt.append(pred_char)

    # Convert integer index of tokens to actual tokens
    gen_txt = [index_to_token[i] for i in gen_txt]
    return gen_txt


def expand_onehot(x):
    """
    Simply converts an integer to a one-hot vector of the same size as out_axis
    """
    return ng.one_hot(x, axis=out_axis)


# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument(
    '--use_embedding',
    default=False,
    dest='use_embedding',
    action='store_true',
    help='If given, embedding layer is used as the first layer')
parser.add_argument('--seq_len',
                    type=int,
                    help="Number of time points in each input sequence",
                    default=32)
parser.add_argument('--recurrent_units',
                    type=int,
                    help="Number of recurrent units in the network",
                    default=256)
parser.set_defaults(num_iterations=20000)
コード例 #13
0
import ngraph as ng
from ngraph.frontends.neon import Layer
from ngraph.frontends.neon import Adam
from ngraph.frontends.neon import make_bound_computation
from ngraph.frontends.neon import NgraphArgparser
from ngraph.frontends.neon import ArrayIterator
from ngraph.frontends.neon import Saver
import ngraph.transformers as ngt

from nlp_architect.models.kvmemn2n import KVMemN2N
from nlp_architect.data.wikimovies import WIKIMOVIES
from nlp_architect.utils.io import validate_parent_exists, check_size
from examples.kvmemn2n.interactive_util import interactive_loop

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument(
    '--emb_size',
    type=int,
    default='50',
    help='Size of the word-embedding used in the model. (default 50)',
    action=check_size(1, 20000))
parser.add_argument('--nhops',
                    type=int,
                    default='3',
                    help='Number of memory hops in the network',
                    action=check_size(1, 20))
parser.add_argument('--lr',
                    type=float,
                    default=0.01,
                    help='learning rate',
コード例 #14
0
"""
from __future__ import division
from __future__ import print_function
import numpy as np
import ngraph as ng
from ngraph.frontends.neon import Affine, Preprocess, Convolution, Pool2D, Sequential
from ngraph.frontends.neon import UniformInit, Rectlin, Softmax, GradientDescentMomentum
from ngraph.frontends.neon import ax, loop_train, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import NgraphArgparser
from ngraph.frontends.neon import ArrayIterator

from cifar10 import CIFAR10
import ngraph.transformers as ngt

parser = NgraphArgparser(description='Train simple CNN on cifar10 dataset')
args = parser.parse_args()

np.random.seed(args.rng_seed)

# Create the dataloader
train_data, valid_data = CIFAR10(args.data_dir).load_data()
train_set = ArrayIterator(train_data,
                          args.batch_size,
                          total_iterations=args.num_iterations)
valid_set = ArrayIterator(valid_data, args.batch_size)
######################
# Model specification


def cifar_mean_subtract(x):
コード例 #15
0
ファイル: lsun_wgan.py プロジェクト: rsumner31/ngraph
        optimizer = RMSProp(learning_rate=5e-5,
                            decay_rate=0.99,
                            epsilon=1e-8,
                            weight_clip_value=weight_clip_value)

    if loss_type == "WGAN-GP":
        optimizer = Adam(learning_rate=1e-4,
                         beta_1=0.5,
                         beta_2=0.9,
                         epsilon=1e-8,
                         weight_clip_value=weight_clip_value)

    return optimizer


parser = NgraphArgparser(description='WGAN on LSUN bedroom dataset')
parser.add_argument('--plot_interval',
                    type=int,
                    default=500,
                    help='display generated samples at this frequency')
parser.add_argument('--lsun_dir',
                    default="/dataset/lsun",
                    help='LSUN data directory')
parser.add_argument(
    '--subset_pct',
    type=float,
    default=50.0,
    help='subset of training dataset to use (percentage), default 50.0')
parser.add_argument('--loss_type',
                    default='WGAN-GP',
                    help='Loss Function',
コード例 #16
0
ファイル: mnist_mlp.py プロジェクト: rsumner31/ngraph
from contextlib import closing
import os
import numpy as np
import ngraph as ng
from ngraph.frontends.neon import Layer, Affine, Preprocess, Sequential
from ngraph.frontends.neon import GaussianInit, Rectlin, Logistic, GradientDescentMomentum
from ngraph.frontends.neon import ax, loop_train, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import loop_eval
from ngraph.frontends.neon import NgraphArgparser
from ngraph.frontends.neon import ArrayIterator

from ngraph.frontends.neon import MNIST
from ngraph.frontends.neon import Saver
import ngraph.transformers as ngt

parser = NgraphArgparser(description='Train simple mlp on mnist dataset')
parser.add_argument('--save_file',
                    type=str,
                    default=None,
                    help="File to save weights")
parser.add_argument('--load_file',
                    type=str,
                    default=None,
                    help="File to load weights")
parser.add_argument('--inference',
                    action="store_true",
                    help="Run Inference with loaded weight")
args = parser.parse_args()

if args.inference and (args.load_file is None):
    print("Need to set --load_file for Inference problem")
コード例 #17
0
        environment = DimShuffleWrapper(environment)

    # todo: perhaps these should be defined in the environment itself
    state_axes = ng.make_axes([
        ng.make_axis(environment.observation_space.shape[0], name='C'),
        ng.make_axis(environment.observation_space.shape[1], name='H'),
        ng.make_axis(environment.observation_space.shape[2], name='W'),
    ])

    agent = dqn.Agent(
        state_axes,
        environment.action_space,
        model=model,
        epsilon=dqn.linear_generator(start=1.0, end=0.1, steps=1000000),
        gamma=0.99,
        learning_rate=0.00025,
        memory=dqn.Memory(maxlen=1000000),
        target_network_update_frequency=1000,
        learning_starts=10000,
    )

    rl_loop.rl_loop_train(environment, agent, episodes=200000)


if __name__ == "__main__":
    from ngraph.frontends.neon import NgraphArgparser

    parser = NgraphArgparser()
    parser.parse_args()
    main()
コード例 #18
0
"""
from __future__ import division
from __future__ import print_function
import numpy as np
import ngraph as ng
from ngraph.frontends.neon import Affine, Preprocess, Sequential
from ngraph.frontends.neon import GaussianInit, Rectlin, Logistic, GradientDescentMomentum
from ngraph.frontends.neon import ax, loop_train, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import NgraphArgparser
from ngraph.frontends.neon import ArrayIterator

from mnist import MNIST
import ngraph.transformers as ngt

parser = NgraphArgparser(description='Train simple mlp on mnist dataset')
args = parser.parse_args()

np.random.seed(args.rng_seed)

# Create the dataloader
train_data, valid_data = MNIST(args.data_dir).load_data()
train_set = ArrayIterator(train_data, args.batch_size, total_iterations=args.num_iterations)
valid_set = ArrayIterator(valid_data, args.batch_size)

inputs = train_set.make_placeholders()
ax.Y.length = 10

######################
# Model specification
seq1 = Sequential([Preprocess(functor=lambda x: x / 255.),
コード例 #19
0
def default_argparser():
    # parse the command line arguments
    parser = NgraphArgparser(__doc__)
    parser.add_argument(
        '--predict_seq',
        default=False,
        dest='predict_seq',
        action='store_true',
        help='If given, seq_len future timepoints are predicted')
    parser.add_argument('--look_ahead',
                        type=int,
                        help="Number of time steps to start predicting from",
                        default=1)
    parser.add_argument('--seq_len',
                        type=int,
                        help="Number of time points in each input sequence",
                        default=32)
    parser.add_argument(
        '--log_interval',
        type=int,
        default=100,
        help="frequency, in number of iterations, after which loss is evaluated"
    )
    parser.add_argument('--save_plots',
                        action="store_true",
                        help="save plots to disk")
    parser.add_argument('--results_dir',
                        type=str,
                        help="Directory to write results to",
                        default='./')
    parser.add_argument('--resume',
                        type=str,
                        default=None,
                        help="weights of the model to resume training with")
    parser.set_defaults()

    return parser
コード例 #20
0
            all_results = {
                name: list(np.transpose(res))
                for name, res in zip(metric_names, results)
            }
        else:
            for name, res in zip(metric_names, results):
                all_results[name].extend(list(res))

    reduced_results = {
        k: np.mean(v[:dataset._dataloader.ndata])
        for k, v in all_results.items() if k != 'predictions'
    }
    return all_results, reduced_results


parser = NgraphArgparser(description=__doc__)
parser.add_argument('--mini',
                    default=False,
                    dest='mini',
                    action='store_true',
                    help='If given, builds a mini version of Inceptionv3')
parser.add_argument("--image_dir",
                    default='/dataset/aeon/I1K/i1k-extracted/',
                    help="Path to extracted imagenet data")
parser.add_argument("--train_manifest_file",
                    default='train-index-tabbed.csv',
                    help="Name of tab separated Aeon training manifest file")
parser.add_argument("--valid_manifest_file",
                    default='val-index-tabbed.csv',
                    help="Name of tab separated Aeon validation manifest file")
parser.add_argument("--optimizer_name",
コード例 #21
0
from contextlib import closing
import ngraph as ng
import os
import ngraph.transformers as ngt
from ngraph.frontends.neon import (Sequential, Deconvolution, Convolution,
                                   Rectlin, Logistic, Tanh, Adam,
                                   ArrayIterator, KaimingInit,
                                   make_bound_computation)
from ngraph.frontends.neon import NgraphArgparser
from ngraph.frontends.neon import MNIST
from ngraph.frontends.neon.logging import ProgressBar
from ngraph.util.names import name_scope
from utils import save_plots, get_image, train_schedule, Noise

# parse command line arguments
parser = NgraphArgparser()
parser.add_argument(
    '--plot_interval',
    type=int,
    default=200,
    help='Save generated images with a period of this many iterations')
parser.add_argument('--loss_type',
                    default='WGAN-GP',
                    help='Loss Function',
                    choices=['DCGAN', 'WGAN', 'WGAN-GP'])
parser.add_argument('--gp_scale',
                    type=int,
                    default=10,
                    help='Scale of the gradient penalty')
parser.add_argument('--w_clip',
                    type=int,
コード例 #22
0
ファイル: char_lstm.py プロジェクト: ugiwgh/ngraph
"""
from contextlib import closing
import ngraph as ng
from ngraph.frontends.neon import (Layer, Sequential, Preprocess, LSTM, Affine,
                                   Softmax, Tanh, Logistic)
from ngraph.frontends.neon import UniformInit, RMSProp
from ngraph.frontends.neon import ax, loop_train
from ngraph.frontends.neon import NgraphArgparser, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import SequentialArrayIterator
import ngraph.transformers as ngt

from ngraph.frontends.neon import PTB

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument('--layer_type',
                    default='lstm',
                    choices=['lstm'],
                    help='type of recurrent layer to use (lstm)')
parser.set_defaults()
args = parser.parse_args()

# these hyperparameters are from the paper
args.batch_size = 64
time_steps = 50
hidden_size = 128
gradient_clip_value = 5

# download penn treebank
tree_bank_data = PTB(path=args.data_dir)
コード例 #23
0
from __future__ import division
from __future__ import print_function
from contextlib import closing
import numpy as np
import ngraph as ng
from ngraph.frontends.neon import Layer, Affine, Preprocess, Convolution, Pooling, Sequential
from ngraph.frontends.neon import XavierInit, Rectlin, Softmax, GradientDescentMomentum
from ngraph.frontends.neon import ax, loop_train
from ngraph.frontends.neon import NgraphArgparser, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import ArrayIterator

from ngraph.frontends.neon import MNIST
import ngraph.transformers as ngt

parser = NgraphArgparser(description='Train LeNet topology on Mnist dataset')
args = parser.parse_args()

np.random.seed(args.rng_seed)

# Create the dataloader
train_data, valid_data = MNIST(args.data_dir).load_data()
train_set = ArrayIterator(train_data,
                          args.batch_size,
                          total_iterations=args.num_iterations)
valid_set = ArrayIterator(valid_data, args.batch_size)

inputs = train_set.make_placeholders()
ax.Y.length = 10

######################
コード例 #24
0
ファイル: imdb_rnn.py プロジェクト: ami-GS/ngraph
from __future__ import division
from __future__ import print_function
from contextlib import closing
import ngraph as ng
from ngraph.frontends.neon import (Layer, Sequential, BiRNN, Recurrent, Affine,
                                   Softmax, Tanh, LookupTable)
from ngraph.frontends.neon import UniformInit, RMSProp
from ngraph.frontends.neon import ax, loop_train, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import NgraphArgparser
from ngraph.frontends.neon import ArrayIterator
import ngraph.transformers as ngt

from imdb import IMDB

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument('--layer_type',
                    default='rnn',
                    choices=['rnn', 'birnn'],
                    help='type of recurrent layer to use (rnn or birnn)')
parser.set_defaults()
args = parser.parse_args()

# these hyperparameters are from the paper
args.batch_size = 128
time_steps = 128
hidden_size = 10
gradient_clip_value = 15
embed_size = 128
vocab_size = 20000
pad_idx = 0
コード例 #25
0
ファイル: train.py プロジェクト: rsumner31/ngraph
        axes=[placeholders['WF'], placeholders['N']], name="X_w")
    placeholders['Y'] = ng.placeholder(axes=[placeholders['N']], name="Y")

    embeddings_placeholders = []
    for lut in range(len(data.parameters['dimensions_embeddings'])):
        embedding_placeholder = ng.placeholder(ng.make_axes(
            [placeholders['N']]),
                                               name="EMB")
        embeddings_placeholders.append(embedding_placeholder)

    placeholders['embeddings_placeholders'] = embeddings_placeholders

    return placeholders


parser = NgraphArgparser(description=__doc__)
parser.add_argument("--learning_rate",
                    type=float,
                    default=0.01,
                    help="Learning rate")
parser.add_argument("--epochs", type=int, default=41, help="Number of epochs")
parser.add_argument(
    "--deep_parameters",
    default='100,50',
    type=str,
    help=
    "Comma separated list of hidden neurons on the deep section of the model")
parser.set_defaults(batch_size=40)

args = parser.parse_args()
コード例 #26
0
"""
from __future__ import division
from __future__ import print_function
from contextlib import closing
import numpy as np
import ngraph as ng
from ngraph.frontends.neon import Layer, Affine, Preprocess, Convolution, Pool2D, Sequential
from ngraph.frontends.neon import UniformInit, Rectlin, Softmax, GradientDescentMomentum
from ngraph.frontends.neon import ax, loop_train
from ngraph.frontends.neon import NgraphArgparser, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import ArrayIterator

from ngraph.frontends.neon import CIFAR10
import ngraph.transformers as ngt

parser = NgraphArgparser(description='Train simple CNN on cifar10 dataset')
parser.add_argument('--use_batch_norm',
                    action='store_true',
                    help='whether to use batch normalization')
args = parser.parse_args()

np.random.seed(args.rng_seed)

# Create the dataloader
train_data, valid_data = CIFAR10(args.data_dir).load_data()
train_set = ArrayIterator(train_data,
                          args.batch_size,
                          total_iterations=args.num_iterations)
valid_set = ArrayIterator(valid_data, args.batch_size)

inputs = train_set.make_placeholders()
コード例 #27
0
ファイル: train.py プロジェクト: cdj0311/nlp-architect
    max_values_squad,
    get_data_array_squad_ngraph,
    cal_f1_score)
import math
from nlp_architect.contrib.ngraph.weight_initilizers import (make_placeholder, make_weights)
from nlp_architect.utils.io import sanitize_path
from nlp_architect.utils.io import validate, validate_existing_directory, \
    validate_existing_filepath, validate_parent_exists, check_size


"""
Training script for reading comprehension model

"""
# parse the command line arguments
parser = NgraphArgparser(__doc__)

parser.add_argument('--data_path', help='enter path for training data',
                    type=str)

parser.add_argument('--gpu_id', default="0", help='enter gpu id',
                    type=str,action=check_size(0,10))

parser.add_argument('--max_para_req', default=100, help='enter the max length of paragraph',
                    type=int, action=check_size(30,300))

parser.add_argument('--batch_size_squad',default=16, help='enter the batch size',
                    type=int, action=check_size(1,256))

parser.set_defaults()
コード例 #28
0
import numpy as np
import ngraph as ng
import ngraph.transformers as ngt
from tqdm import tqdm
from contextlib import closing

from ngraph.frontends.neon import NgraphArgparser, ArrayIterator
from ngraph.frontends.neon import GaussianInit, UniformInit
from ngraph.frontends.neon import Affine, Convolution, Pool2D, Sequential
from ngraph.frontends.neon import Rectlin, Softmax, GradientDescentMomentum
from ngraph.frontends.neon import ax

np.seterr(all='raise')

parser = NgraphArgparser(
    description='Train convnet-vgg_a model on random dataset')
# Default batch_size for convnet-vgg_a is 64
parser.set_defaults(batch_size=64, num_iterations=100)
args = parser.parse_args()

# Setup data provider
image_size = 224
X_train = np.random.uniform(-1, 1,
                            (args.batch_size, 3, image_size, image_size))
y_train = np.ones(shape=(args.batch_size), dtype=np.int32)
train_data = {
    'image': {
        'data': X_train,
        'axes': ('batch', 'C', 'height', 'width')
    },
    'label': {
コード例 #29
0
        noise_samples = self.noise_samples(self.batch_size, self.num_iter)

        self.train_set = {
            'data_sample': {
                'data': data_samples,
                'axes': ('batch', 'sample')
            },
            'noise_sample': {
                'data': noise_samples,
                'axes': ('batch', 'sample')
            }
        }
        return self.train_set


parser = NgraphArgparser(description='MLP GAN example')
args = parser.parse_args()

#  model parameters
h_dim = 4
minibatch_discrimination = False
num_iterations = 600
batch_size = 12
num_examples = num_iterations * batch_size

# generator
generator_layers = [
    affine_layer(h_dim, Rectlin(), name='g0'),
    affine_layer(1, Identity(), name='g1')
]
generator = Sequential(generator_layers)
コード例 #30
0
ファイル: dist_hetr.py プロジェクト: rsumner31/ngraph
1. run `python dist_hetr.py --graph_vis`

2. run `tensorboard --logdir /tmp/hetr_tb/ --port 6006`

use ssh port forwarding to run on remote server
https://stackoverflow.com/questions/37987839/how-can-i-run-tensorboard-on-a-remote-server
"""
from __future__ import print_function
from contextlib import closing
import ngraph as ng
import ngraph.transformers as ngt
from ngraph.frontends.neon import NgraphArgparser
import numpy as np

# Command Line Parser
parser = NgraphArgparser(description="Distributed HeTr Example")
parser.add_argument("--graph_vis",
                    action="store_true",
                    help="enable graph visualization")
args = parser.parse_args()

# Build the graph
H = ng.make_axis(length=6, name='height')
N = ng.make_axis(length=8, name='batch')
W1 = ng.make_axis(length=2, name='W1')
W2 = ng.make_axis(length=4, name='W2')
x = ng.placeholder(axes=[H, N])
w1 = ng.placeholder(axes=[W1, H])
w2 = ng.placeholder(axes=[W2, W1])
with ng.metadata(device_id=('0', '1'), parallel=N):
    dot1 = ng.dot(w1, x).named("dot1")
コード例 #31
0
ファイル: deepspeech.py プロジェクト: rsumner31/ngraph
        Tuple of (ground truth transcript, decoded transcript, CER)
    """
    ground_truth = decoder.process_string(decoder.convert_to_string(inds),
                                          remove_repetitions=False)
    decoded_string = decoder.decode(probs)
    cer = decoder.cer(ground_truth, decoded_string) / float(len(ground_truth))

    return ground_truth, decoded_string, cer


if __name__ == "__main__":
    import logging
    from ngraph.frontends.neon.logging import ProgressBar, PBStreamHandler
    from ngraph.frontends.neon import NgraphArgparser

    parser = NgraphArgparser()
    structure = parser.add_argument_group("Network Structure")
    structure.add_argument(
        '--nfilters',
        type=int,
        help='Number of convolutional filters in the first layer',
        default=256)
    structure.add_argument('--filter_width',
                           type=int,
                           help='Width of 1D convolutional filters',
                           default=11)
    structure.add_argument('--str_w',
                           type=int,
                           help='Stride in time',
                           default=3)
    structure.add_argument('--depth',
コード例 #32
0
                name: list(res)
                for name, res in zip(metric_names, results)
            }
        else:
            for name, res in zip(metric_names, results):
                all_results[name].extend(list(res))

    reduced_results = {
        k: np.mean(v[:dataset.ndata])
        for k, v in all_results.items()
    }
    return reduced_results


if __name__ == "__main__":
    parser = NgraphArgparser(
        description='Train deep residual network on cifar10 dataset')
    parser.add_argument(
        '--stage_depth',
        type=int,
        default=2,
        help='depth of each stage (network depth will be 9n+2)')
    parser.add_argument('--use_aeon',
                        action='store_true',
                        help='whether to use aeon dataloader')
    args = parser.parse_args()

    np.random.seed(args.rng_seed)

    # Create the dataloader
    if args.use_aeon:
        from data import make_aeon_loaders
コード例 #33
0
ファイル: interactive.py プロジェクト: cdj0311/nlp-architect
from contextlib import closing
import ngraph as ng
from ngraph.frontends.neon import Layer
from ngraph.frontends.neon import GaussianInit, Adam
from ngraph.frontends.neon import make_bound_computation
from ngraph.frontends.neon import NgraphArgparser
from ngraph.frontends.neon import ArrayIterator
from ngraph.frontends.neon import Saver
import ngraph.transformers as ngt
from nlp_architect.data.babi_dialog import BABI_Dialog
from nlp_architect.models.memn2n_dialogue import MemN2N_Dialog
from utils import interactive_loop
from nlp_architect.utils.io import validate_existing_filepath, validate_parent_exists, validate

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument(
    '--task',
    type=int,
    default='1',
    choices=range(1, 7),
    help='the task ID to train/test on from bAbI-dialog dataset (1-6)')
parser.add_argument(
    '--emb_size',
    type=int,
    default='32',
    help='Size of the word-embedding used in the model. (default 128)')
parser.add_argument(
    '--nhops',
    type=int,
    default='3',