Пример #1
0
                                   Softmax, Tanh, LookupTable)
from ngraph.frontends.neon import UniformInit, RMSProp
from ngraph.frontends.neon import ax, loop_train, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import NgraphArgparser
from ngraph.frontends.neon import ArrayIterator
import ngraph.transformers as ngt

from imdb import IMDB

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument('--layer_type',
                    default='rnn',
                    choices=['rnn', 'birnn'],
                    help='type of recurrent layer to use (rnn or birnn)')
parser.set_defaults(gen_be=False)
args = parser.parse_args()

# these hyperparameters are from the paper
args.batch_size = 128
time_steps = 128
hidden_size = 10
gradient_clip_value = 15
embed_size = 128
vocab_size = 20000
pad_idx = 0

# download IMDB
imdb_dataset = IMDB(path=args.data_dir,
                    sentence_length=time_steps,
                    pad_idx=pad_idx)
Пример #2
0
# parse the command line arguments
parser = NgraphArgparser(__doc__)

parser.add_argument('--data_path', help='enter path for training data',
                    type=str)

parser.add_argument('--gpu_id', default="0", help='enter gpu id',
                    type=str,action=check_size(0,10))

parser.add_argument('--max_para_req', default=100, help='enter the max length of paragraph',
                    type=int, action=check_size(30,300))

parser.add_argument('--batch_size_squad',default=16, help='enter the batch size',
                    type=int, action=check_size(1,256))

parser.set_defaults()

args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id

hidden_size = 150
gradient_clip_value = 15
embed_size = 300

params_dict = {}
params_dict['batch_size'] = args.batch_size_squad
params_dict['embed_size'] = 300
params_dict['pad_idx'] = 0
params_dict['hs'] = hidden_size
params_dict['glove_dim'] = 300
params_dict['iter_interval'] = 8000
Пример #3
0
import ngraph as ng
from ngraph.frontends.neon import (Layer, Sequential, Preprocess, LSTM,
                                   Affine, Softmax, Tanh, Logistic)
from ngraph.frontends.neon import UniformInit, RMSProp
from ngraph.frontends.neon import ax, loop_train
from ngraph.frontends.neon import NgraphArgparser, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import SequentialArrayIterator
import ngraph.transformers as ngt

from ngraph.frontends.neon import PTB

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.add_argument('--layer_type', default='lstm', choices=['lstm'],
                    help='type of recurrent layer to use (lstm)')
parser.set_defaults()
args = parser.parse_args()

# these hyperparameters are from the paper
args.batch_size = 64
time_steps = 50
hidden_size = 128
gradient_clip_value = 5

# download penn treebank
tree_bank_data = PTB(path=args.data_dir)
ptb_data = tree_bank_data.load_data()
train_set = SequentialArrayIterator(ptb_data['train'], batch_size=args.batch_size,
                                    time_steps=time_steps, total_iterations=args.num_iterations)

valid_set = SequentialArrayIterator(ptb_data['valid'], batch_size=args.batch_size,
Пример #4
0
import ngraph.transformers as ngt
from tqdm import tqdm
from contextlib import closing

from ngraph.frontends.neon import NgraphArgparser, ArrayIterator
from ngraph.frontends.neon import GaussianInit, UniformInit
from ngraph.frontends.neon import Affine, Convolution, Pooling, Sequential
from ngraph.frontends.neon import Rectlin, Softmax, GradientDescentMomentum
from ngraph.frontends.neon import ax

np.seterr(all='raise')

parser = NgraphArgparser(
    description='Train convnet-vgg_a model on random dataset')
# Default batch_size for convnet-vgg_a is 64
parser.set_defaults(batch_size=64, num_iterations=100)
args = parser.parse_args()

# Setup data provider
image_size = 224
X_train = np.random.uniform(-1, 1,
                            (args.batch_size, 3, image_size, image_size))
y_train = np.ones(shape=(args.batch_size), dtype=np.int32)
train_data = {
    'image': {
        'data': X_train,
        'axes': ('N', 'C', 'H', 'W')
    },
    'label': {
        'data': y_train,
        'axes': ('N', )
Пример #5
0
    return placeholders


parser = NgraphArgparser(description=__doc__)
parser.add_argument("--learning_rate",
                    type=float,
                    default=0.01,
                    help="Learning rate")
parser.add_argument("--epochs", type=int, default=41, help="Number of epochs")
parser.add_argument(
    "--deep_parameters",
    default='100,50',
    type=str,
    help=
    "Comma separated list of hidden neurons on the deep section of the model")
parser.set_defaults(batch_size=40)

args = parser.parse_args()

fc_layers_deep = [int(s) for s in args.deep_parameters.split(',')]

cs_loader = data.CensusDataset(args.batch_size)

inputs = make_placeholders(args.batch_size, cs_loader)

model = WideDeepClassifier(cs_loader.parameters['dimensions_embeddings'],
                           cs_loader.parameters['tokens_in_embeddings'],
                           fc_layers_deep,
                           deep_activation_fn=Rectlin())

wide_deep = model(args.batch_size, inputs)
Пример #6
0
                    dest='mini',
                    action='store_true',
                    help='If given, builds a mini version of Inceptionv3')
parser.add_argument("--image_dir",
                    default='/dataset/aeon/I1K/i1k-extracted/',
                    help="Path to extracted imagenet data")
parser.add_argument("--train_manifest_file",
                    default='train-index-tabbed.csv',
                    help="Name of tab separated Aeon training manifest file")
parser.add_argument("--valid_manifest_file",
                    default='val-index-tabbed.csv',
                    help="Name of tab separated Aeon validation manifest file")
parser.add_argument("--optimizer_name",
                    default='rmsprop',
                    help="Name of optimizer (rmsprop or sgd)")
parser.set_defaults(batch_size=4, num_iterations=10000000, iter_interval=2000)
args = parser.parse_args()

# Set the random seed
np.random.seed(1)
# Number of outputs of last layer.
ax.Y.length = 1000
ax.N.length = args.batch_size

# Build AEON data loader objects
train_set, valid_set = make_aeon_loaders(
    train_manifest=args.train_manifest_file,
    valid_manifest=args.valid_manifest_file,
    batch_size=args.batch_size,
    train_iterations=args.num_iterations,
    dataset='i1k',
Пример #7
0
    '--restore',
    default=False,
    action='store_true',
    help='Restore weights if found.')
parser.add_argument(
    '--interactive',
    default=False,
    action='store_true',
    help='enable interactive mode at the end of training.')
parser.add_argument(
    '--test',
    default=False,
    action='store_true',
    help='evaluate on the test set at the end of training.')

parser.set_defaults(batch_size=32, epochs=200)
args = parser.parse_args()

validate((args.emb_size, int, 1, 10000),
         (args.eps, float, 1e-15, 1e-2),
         (args.lr, float, 1e-8, 10),
         (args.grad_clip_norm, float, 1e-3, 1e5))

# Validate inputs
validate_parent_exists(args.log_file)
log_file = args.log_file
validate_parent_exists(args.weights_save_path)
weights_save_path = args.weights_save_path
validate_parent_exists(args.data_dir)
data_dir = args.data_dir
assert weights_save_path.endswith('.npz')
Пример #8
0
parser = NgraphArgparser(__doc__)
parser.add_argument(
    '--use_embedding',
    default=False,
    dest='use_embedding',
    action='store_true',
    help='If given, embedding layer is used as the first layer')
parser.add_argument('--seq_len',
                    type=int,
                    help="Number of time points in each input sequence",
                    default=32)
parser.add_argument('--recurrent_units',
                    type=int,
                    help="Number of recurrent units in the network",
                    default=256)
parser.set_defaults(num_iterations=20000)
args = parser.parse_args()

use_embedding = args.use_embedding
recurrent_units = args.recurrent_units
batch_size = args.batch_size
seq_len = args.seq_len
num_iterations = args.num_iterations

# Ratio of the text to use for training
train_ratio = 0.95
# Define initialization method of neurons in the network
init_uni = UniformInit(-0.1, 0.1)

# Create the object that includes the sample text
shakes = Shakespeare(train_split=train_ratio)
Пример #9
0
import numpy as np
from contextlib import closing
import ngraph as ng
from ngraph.frontends.neon import Layer, Preprocess, Recurrent, Affine, Softmax, Tanh
from ngraph.frontends.neon import UniformInit, RMSProp
from ngraph.frontends.neon import ax, loop_train
from ngraph.frontends.neon import NgraphArgparser, make_bound_computation, make_default_callbacks
from ngraph.frontends.neon import SequentialArrayIterator
import ngraph.transformers as ngt

from ngraph.frontends.neon import PTB

# parse the command line arguments
parser = NgraphArgparser(__doc__)
parser.set_defaults(batch_size=128, num_iterations=2000)
args = parser.parse_args()

# model parameters
time_steps = 5
hidden_size = 256
gradient_clip_value = 5

# download penn treebank
# set shift_target to be False, since it is going to predict the same sequence
tree_bank_data = PTB(path=args.data_dir, shift_target=False)
ptb_data = tree_bank_data.load_data()
train_set = SequentialArrayIterator(ptb_data['train'],
                                    batch_size=args.batch_size,
                                    time_steps=time_steps,
                                    total_iterations=args.num_iterations,
Пример #10
0
parser.add_argument('--use_oov',
                    default=False,
                    action='store_true',
                    help='use OOV test set')
parser.add_argument(
    '--eps',
    type=float,
    default=1e-8,
    help='epsilon used to avoid divide by zero in softmax renormalization.',
    action=check_size(1e-100, 1e-2))
parser.add_argument('--model_file',
                    default='memn2n_weights.npz',
                    help='File to load model weights from.',
                    type=str)

parser.set_defaults(batch_size=32, epochs=200)
args = parser.parse_args()

validate((args.emb_size, int, 1, 10000), (args.eps, float, 1e-15, 1e-2))

# Sanitize inputs
validate_existing_filepath(args.model_file)
model_file = args.model_file
assert model_file.endswith('.npz')
validate_parent_exists(args.data_dir)
data_dir = args.data_dir

babi = BABI_Dialog(path=data_dir,
                   task=args.task,
                   oov=args.use_oov,
                   use_match_type=args.use_match_type,