コード例 #1
0
def default_argparser():
    # parse the command line arguments
    parser = NgraphArgparser(__doc__)
    parser.add_argument(
        '--predict_seq',
        default=False,
        dest='predict_seq',
        action='store_true',
        help='If given, seq_len future timepoints are predicted')
    parser.add_argument('--look_ahead',
                        type=int,
                        help="Number of time steps to start predicting from",
                        default=1)
    parser.add_argument('--seq_len',
                        type=int,
                        help="Number of time points in each input sequence",
                        default=32)
    parser.add_argument(
        '--log_interval',
        type=int,
        default=100,
        help="frequency, in number of iterations, after which loss is evaluated"
    )
    parser.add_argument('--save_plots',
                        action="store_true",
                        help="save plots to disk")
    parser.add_argument('--results_dir',
                        type=str,
                        help="Directory to write results to",
                        default='./')
    parser.add_argument('--resume',
                        type=str,
                        default=None,
                        help="weights of the model to resume training with")
    parser.set_defaults()

    return parser
コード例 #2
0
ファイル: dist_hetr.py プロジェクト: rsumner31/ngraph
1. run `python dist_hetr.py --graph_vis`

2. run `tensorboard --logdir /tmp/hetr_tb/ --port 6006`

use ssh port forwarding to run on remote server
https://stackoverflow.com/questions/37987839/how-can-i-run-tensorboard-on-a-remote-server
"""
from __future__ import print_function
from contextlib import closing
import ngraph as ng
import ngraph.transformers as ngt
from ngraph.frontends.neon import NgraphArgparser
import numpy as np

# Command Line Parser
parser = NgraphArgparser(description="Distributed HeTr Example")
parser.add_argument("--graph_vis",
                    action="store_true",
                    help="enable graph visualization")
args = parser.parse_args()

# Build the graph
H = ng.make_axis(length=6, name='height')
N = ng.make_axis(length=8, name='batch')
W1 = ng.make_axis(length=2, name='W1')
W2 = ng.make_axis(length=4, name='W2')
x = ng.placeholder(axes=[H, N])
w1 = ng.placeholder(axes=[W1, H])
w2 = ng.placeholder(axes=[W2, W1])
with ng.metadata(device_id=('0', '1'), parallel=N):
    dot1 = ng.dot(w1, x).named("dot1")
コード例 #3
0
ファイル: char_rnn.py プロジェクト: jlwhite709/ngraph
from contextlib import closing
import ngraph as ng
from ngraph.frontends.neon import (Layer, Sequential, Preprocess, BiRNN,
                                   Recurrent, Affine, Softmax, Tanh,
                                   LookupTable)
from ngraph.frontends.neon import UniformInit, RMSProp
from ngraph.frontends.neon import ax, loop_train
from ngraph.frontends.neon import NgraphArgparser, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import SequentialArrayIterator
import ngraph.transformers as ngt

from ngraph.frontends.neon import PTB

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument('--layer_type',
                    default='rnn',
                    choices=['rnn', 'birnn'],
                    help='type of recurrent layer to use (rnn or birnn)')
parser.add_argument('--use_lut',
                    action='store_true',
                    help='choose to use lut as first layer')
parser.set_defaults()
args = parser.parse_args()

# these hyperparameters are from the paper
args.batch_size = 50
time_steps = 150
hidden_size = 500
コード例 #4
0
import numpy as np
import ngraph as ng
import ngraph.transformers as ngt
from tqdm import tqdm
from contextlib import closing

from ngraph.frontends.neon import NgraphArgparser, ArrayIterator
from ngraph.frontends.neon import GaussianInit, UniformInit
from ngraph.frontends.neon import Affine, Convolution, Pool2D, Sequential
from ngraph.frontends.neon import Rectlin, Softmax, GradientDescentMomentum
from ngraph.frontends.neon import ax

np.seterr(all='raise')

parser = NgraphArgparser(
    description='Train convnet-vgg_a model on random dataset')
# Default batch_size for convnet-vgg_a is 64
parser.set_defaults(batch_size=64, num_iterations=100)
args = parser.parse_args()

# Setup data provider
image_size = 224
X_train = np.random.uniform(-1, 1,
                            (args.batch_size, 3, image_size, image_size))
y_train = np.ones(shape=(args.batch_size), dtype=np.int32)
train_data = {
    'image': {
        'data': X_train,
        'axes': ('batch', 'C', 'height', 'width')
    },
    'label': {
コード例 #5
0
                           preprocess=True))
    else:
        fprop_computation_op = ng.computation(model_out, "all")

        benchmark_fprop = Benchmark(fprop_computation_op, train_set, inputs,
                                    args.backend, args.hetr_device)
        Benchmark.print_benchmark_results(
            benchmark_fprop.time(args.num_iterations,
                                 args.skip_iter,
                                 'ds2_fprop',
                                 args.visualize,
                                 preprocess=True))


if __name__ == "__main__":
    parser = NgraphArgparser(description='Train mini deep speech 2')
    parser.add_argument(
        '--nfilters',
        type=int,
        help='Number of convolutional filters in the first layer',
        default=2)
    parser.add_argument('--filter_width',
                        type=int,
                        help='Width of 1D convolutional filters',
                        default=11)
    parser.add_argument('--str_w', type=int, help='Stride in time', default=1)
    parser.add_argument('--depth',
                        type=int,
                        help='Number of RNN layers',
                        default=1)
    parser.add_argument(
コード例 #6
0
        benchmark = Benchmark(batch_cost_computation_op, train_set, inputs,
                              transformer_type, device)
        Benchmark.print_benchmark_results(
            benchmark.time(num_iterations, n_skip, dataset + '_msra_bprop',
                           visualize, 'device_id'))
    else:
        fprop_computation_op = ng.computation(model_out, 'all')
        benchmark = Benchmark(fprop_computation_op, train_set, inputs,
                              transformer_type, device)
        Benchmark.print_benchmark_results(
            benchmark.time(num_iterations, n_skip, dataset + '_msra_fprop',
                           visualize))


if __name__ == "__main__":
    parser = NgraphArgparser(description='Train deep residual network')
    parser.add_argument('-data',
                        '--data_set',
                        default='cifar10',
                        choices=['cifar10', 'i1k'],
                        help="data set name")
    parser.add_argument('-s',
                        '--skip_iter',
                        type=int,
                        default=1,
                        help="number of iterations to skip")
    parser.add_argument('-m',
                        '--num_devices',
                        nargs='+',
                        type=int,
                        default=[1],
コード例 #7
0
ファイル: mnist_dcgan.py プロジェクト: ugiwgh/ngraph
            self.cnt += 1
            return np.random.normal(loc=self.mean,
                                    scale=self.std,
                                    size=self.shape)
        else:
            raise StopIteration

    def next(self):
        return self.__next__()

    def __iter__(self):
        return self


# parse command line arguments
parser = NgraphArgparser()
parser.add_argument(
    '--plot_interval',
    type=int,
    default=200,
    help='save generated images with a period of this many iterations')
parser.add_argument('--seed', type=int, default=0, help='random seed')
args = parser.parse_args()
np.random.seed(args.rng_seed)

args.batch_size = 32

# Create the dataloader
train_data, valid_data = MNIST(args.data_dir).load_data()
train_set = ArrayIterator(train_data, args.batch_size)
コード例 #8
0
ファイル: train_resnet.py プロジェクト: rsumner31/ngraph
    # Hyperparameters
    # Optimizer
    base_lr = 0.1
    gamma = 0.1
    momentum_coef = 0.9
    wdecay = 0.0001
    nesterov = False

    print("HyperParameters")
    print("Learning Rate:     " + str(base_lr))
    print("Momentum:          " + str(momentum_coef))
    print("Weight Decay:      " + str(wdecay))
    print("Nesterov:          " + str(nesterov))

    # Command Line Parser
    parser = NgraphArgparser(description="Resnet for Imagenet and Cifar10")
    parser.add_argument('--dataset',
                        type=str,
                        default="cifar10",
                        help="Enter cifar10 or i1k")
    parser.add_argument('--size',
                        type=int,
                        default=56,
                        help="Enter size of resnet")
    parser.add_argument('--tb',
                        action="store_true",
                        help="1- Enables tensorboard")
    parser.add_argument('--logfile',
                        type=str,
                        default=None,
                        help="Name of the csv which \
コード例 #9
0
"""
from __future__ import division
from __future__ import print_function
import numpy as np
import ngraph as ng
from ngraph.frontends.neon import Affine, Preprocess, Sequential
from ngraph.frontends.neon import GaussianInit, Rectlin, Logistic, GradientDescentMomentum
from ngraph.frontends.neon import ax, loop_train, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import NgraphArgparser
from ngraph.frontends.neon import ArrayIterator

from mnist import MNIST
import ngraph.transformers as ngt

parser = NgraphArgparser(description='Train simple mlp on mnist dataset')
args = parser.parse_args()

np.random.seed(args.rng_seed)

# Create the dataloader
train_data, valid_data = MNIST(args.data_dir).load_data()
train_set = ArrayIterator(train_data, args.batch_size, total_iterations=args.num_iterations)
valid_set = ArrayIterator(valid_data, args.batch_size)

inputs = train_set.make_placeholders()
ax.Y.length = 10

######################
# Model specification
seq1 = Sequential([Preprocess(functor=lambda x: x / 255.),
コード例 #10
0
        noise_samples = self.noise_samples(self.batch_size, self.num_iter)

        self.train_set = {
            'data_sample': {
                'data': data_samples,
                'axes': ('batch', 'sample')
            },
            'noise_sample': {
                'data': noise_samples,
                'axes': ('batch', 'sample')
            }
        }
        return self.train_set


parser = NgraphArgparser(description='MLP GAN example')
args = parser.parse_args()

#  model parameters
h_dim = 4
minibatch_discrimination = False
num_iterations = 600
batch_size = 12
num_examples = num_iterations * batch_size

# generator
generator_layers = [
    affine_layer(h_dim, Rectlin(), name='g0'),
    affine_layer(1, Identity(), name='g1')
]
generator = Sequential(generator_layers)
コード例 #11
0
from __future__ import division
from __future__ import print_function
from contextlib import closing
import numpy as np
import ngraph as ng
from ngraph.frontends.neon import Layer, Affine, Preprocess, Convolution, Pooling, Sequential
from ngraph.frontends.neon import XavierInit, Rectlin, Softmax, GradientDescentMomentum
from ngraph.frontends.neon import ax, loop_train
from ngraph.frontends.neon import NgraphArgparser, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import ArrayIterator

from ngraph.frontends.neon import MNIST
import ngraph.transformers as ngt

parser = NgraphArgparser(description='Train LeNet topology on Mnist dataset')
args = parser.parse_args()

np.random.seed(args.rng_seed)

# Create the dataloader
train_data, valid_data = MNIST(args.data_dir).load_data()
train_set = ArrayIterator(train_data,
                          args.batch_size,
                          total_iterations=args.num_iterations)
valid_set = ArrayIterator(valid_data, args.batch_size)

inputs = train_set.make_placeholders()
ax.Y.length = 10

######################
コード例 #12
0
ファイル: overfeat.py プロジェクト: ugiwgh/ngraph
import numpy as np
import ngraph as ng
import ngraph.transformers as ngt
from tqdm import tqdm
from contextlib import closing

from ngraph.frontends.neon import NgraphArgparser, ArrayIterator
from ngraph.frontends.neon import GaussianInit, UniformInit
from ngraph.frontends.neon import Affine, Convolution, Pool2D, Sequential
from ngraph.frontends.neon import Rectlin, Softmax, GradientDescentMomentum
from ngraph.frontends.neon import ax

np.seterr(all='raise')

parser = NgraphArgparser(
    description='Train convnet-overfeat model on random dataset')
# Default batch_size for convnet-overfeat is 128.
parser.set_defaults(batch_size=128, num_iterations=100)
args = parser.parse_args()

# Setup data provider
image_size = 231
X_train = np.random.uniform(-1, 1,
                            (args.batch_size, 3, image_size, image_size))
y_train = np.ones(shape=(args.batch_size), dtype=np.int32)
train_data = {
    'image': {
        'data': X_train,
        'axes': ('batch', 'C', 'height', 'width')
    },
    'label': {
コード例 #13
0
ファイル: lsun_wgan.py プロジェクト: rsumner31/ngraph
        optimizer = RMSProp(learning_rate=5e-5,
                            decay_rate=0.99,
                            epsilon=1e-8,
                            weight_clip_value=weight_clip_value)

    if loss_type == "WGAN-GP":
        optimizer = Adam(learning_rate=1e-4,
                         beta_1=0.5,
                         beta_2=0.9,
                         epsilon=1e-8,
                         weight_clip_value=weight_clip_value)

    return optimizer


parser = NgraphArgparser(description='WGAN on LSUN bedroom dataset')
parser.add_argument('--plot_interval',
                    type=int,
                    default=500,
                    help='display generated samples at this frequency')
parser.add_argument('--lsun_dir',
                    default="/dataset/lsun",
                    help='LSUN data directory')
parser.add_argument(
    '--subset_pct',
    type=float,
    default=50.0,
    help='subset of training dataset to use (percentage), default 50.0')
parser.add_argument('--loss_type',
                    default='WGAN-GP',
                    help='Loss Function',
コード例 #14
0
"""
from __future__ import division
from __future__ import print_function
import numpy as np
import ngraph as ng
from ngraph.frontends.neon import Affine, Preprocess, Convolution, Pool2D, Sequential
from ngraph.frontends.neon import UniformInit, Rectlin, Softmax, GradientDescentMomentum
from ngraph.frontends.neon import ax, loop_train, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import NgraphArgparser
from ngraph.frontends.neon import ArrayIterator

from cifar10 import CIFAR10
import ngraph.transformers as ngt

parser = NgraphArgparser(description='Train simple CNN on cifar10 dataset')
args = parser.parse_args()

np.random.seed(args.rng_seed)

# Create the dataloader
train_data, valid_data = CIFAR10(args.data_dir).load_data()
train_set = ArrayIterator(train_data,
                          args.batch_size,
                          total_iterations=args.num_iterations)
valid_set = ArrayIterator(valid_data, args.batch_size)
######################
# Model specification


def cifar_mean_subtract(x):
コード例 #15
0
            all_results = {
                name: list(np.transpose(res))
                for name, res in zip(metric_names, results)
            }
        else:
            for name, res in zip(metric_names, results):
                all_results[name].extend(list(res))

    reduced_results = {
        k: np.mean(v[:dataset._dataloader.ndata])
        for k, v in all_results.items() if k != 'predictions'
    }
    return all_results, reduced_results


parser = NgraphArgparser(description=__doc__)
parser.add_argument('--mini',
                    default=False,
                    dest='mini',
                    action='store_true',
                    help='If given, builds a mini version of Inceptionv3')
parser.add_argument("--image_dir",
                    default='/dataset/aeon/I1K/i1k-extracted/',
                    help="Path to extracted imagenet data")
parser.add_argument("--train_manifest_file",
                    default='train-index-tabbed.csv',
                    help="Name of tab separated Aeon training manifest file")
parser.add_argument("--valid_manifest_file",
                    default='val-index-tabbed.csv',
                    help="Name of tab separated Aeon validation manifest file")
parser.add_argument("--optimizer_name",
コード例 #16
0
                name: list(res)
                for name, res in zip(metric_names, results)
            }
        else:
            for name, res in zip(metric_names, results):
                all_results[name].extend(list(res))

    reduced_results = {
        k: np.mean(v[:dataset.ndata])
        for k, v in all_results.items()
    }
    return reduced_results


if __name__ == "__main__":
    parser = NgraphArgparser(
        description='Train deep residual network on cifar10 dataset')
    parser.add_argument(
        '--stage_depth',
        type=int,
        default=2,
        help='depth of each stage (network depth will be 9n+2)')
    parser.add_argument('--use_aeon',
                        action='store_true',
                        help='whether to use aeon dataloader')
    args = parser.parse_args()

    np.random.seed(args.rng_seed)

    # Create the dataloader
    if args.use_aeon:
        from data import make_aeon_loaders
コード例 #17
0
ファイル: video_c3d.py プロジェクト: rsumner31/ngraph
    train_set = make_train_loader(manifest[1], manifest_root, batch_size,
                                  subset_pct, rng_seed)
    valid_set = make_validation_loader(manifest[0], manifest_root, batch_size,
                                       subset_pct)

    return train_set, valid_set


if __name__ == "__main__":

    # Load training configuration and parse arguments
    train_config = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                'train.cfg')
    config_files = [train_config] if os.path.exists(train_config) else []
    parser = NgraphArgparser(__doc__, default_config_files=config_files)

    parser.add_argument('--subset_pct',
                        type=float,
                        default=100,
                        help='subset of training dataset to use (percentage)')
    parser.add_argument('--log_file',
                        type=str,
                        default='training_log.pkl',
                        help='name for the trainig log file')
    args = parser.parse_args()

    np.random.seed = args.rng_seed

    # Load data
    train_set, valid_set = get_data(args.manifest, args.manifest_root,