Example #1
0
                              name='train')
        valid = ArrayIterator(self.valid_x,
                              self.valid_y,
                              lshape=self.shape,
                              make_onehot=False,
                              name='valid')
        self._data_dict = {'train': train, 'valid': valid}
        return self._data_dict


def r_score(y_true, y_pred):
    r2 = r2_score(y_true, y_pred)
    return (np.sign(r2) * math.sqrt(math.fabs(r2)))


parser = NeonArgparser(__doc__)
parser.add_argument('-quick',
                    '--quick_mode',
                    action="store_true",
                    help="use a small subset of the data")
parser.add_argument('-ts',
                    '--time_steps',
                    default=7,
                    help='number of time steps')
args = parser.parse_args()
dataset = Fin(nlags=int(args.time_steps),
              path=args.data_dir,
              quick=args.quick_mode)

train = dataset.train_iter
valid = dataset.valid_iter
Example #2
0
"""

import util
from objectlocalization import PASCALVOC
from neon.backends import gen_backend
from neon.util.persist import get_data_cache_dir, save_obj
from neon.util.argparser import NeonArgparser, extract_valid_args
from neon.util.compat import range
from neon import logger as neon_logger
from voc_eval import voc_eval
import numpy as np
import faster_rcnn
from tqdm import tqdm

# parse the command line arguments
parser = NeonArgparser(__doc__, default_overrides={'batch_size': 1})
parser.add_argument('--normalize',
                    action='store_true',
                    help='Normalize the final bounding box regression layers.')
parser.add_argument('--output',
                    default=None,
                    help='File to save inference results (optional)')
parser.add_argument('--width',
                    type=int,
                    default=1000,
                    help='Width of input image')
parser.add_argument('--height',
                    type=int,
                    default=1000,
                    help='Height of input image')
Example #3
0
    # evaluation
    error_rate = mlp_model.eval(valid_set)
    logger.info('Mis-classification error on validation set= %0.1f',
                error_rate * 100)

    reslts = mlp_model.get_outputs(valid_set)

    return reslts


# -------------------------------------------------------------------------------------#

if __name__ == "__main__":
    # parse the command line arguments
    parser = NeonArgparser()
    parser.add_argument('--data_set_file',
                        default='data/data_set.pkl',
                        type=validate_existing_filepath,
                        help='train and validation sets path')
    parser.add_argument('--model_prm',
                        default='data/mcs_model.prm',
                        type=validate_parent_exists,
                        help='trained model full path')

    args = parser.parse_args()

    # generate backend, it is optional to change to backend='mkl'
    be = gen_backend(backend='cpu', batch_size=10)

    # read training and validation data file
Example #4
0
def main():
    # larger batch sizes may not fit on GPU
    parser = NeonArgparser(__doc__, default_overrides={'batch_size': 4})
    parser.add_argument("--bench", action="store_true", help="run benchmark instead of training")
    parser.add_argument("--num_classes", type=int, default=12, help="number of classes in the annotation")
    parser.add_argument("--height", type=int, default=256, help="image height")
    parser.add_argument("--width", type=int, default=512, help="image width")

    args = parser.parse_args(gen_be=False)

    # check that image dimensions are powers of 2
    if((args.height & (args.height - 1)) != 0):
        raise TypeError("Height must be a power of 2.")
    if((args.width & (args.width - 1)) != 0):
        raise TypeError("Width must be a power of 2.")

    (c, h, w) = (args.num_classes, args.height, args.width)

    # need to use the backend with the new upsampling layer implementation
    be = NervanaGPU_Upsample(rng_seed=args.rng_seed,
                             device_id=args.device_id)
    # set batch size
    be.bsz = args.batch_size

    # couple backend to global neon object
    NervanaObject.be = be

    shape = dict(channel_count=3, height=h, width=w, subtract_mean=False)
    train_params = ImageParams(center=True, flip=False,
                               scale_min=min(h, w), scale_max=min(h, w),
                               aspect_ratio=0, **shape)
    test_params = ImageParams(center=True, flip=False,
                              scale_min=min(h, w), scale_max=min(h, w),
                              aspect_ratio=0, **shape)
    common = dict(target_size=h*w, target_conversion='read_contents',
                  onehot=False, target_dtype=np.uint8, nclasses=args.num_classes)

    train_set = PixelWiseImageLoader(set_name='train', repo_dir=args.data_dir,
                                      media_params=train_params,
                                      shuffle=False, subset_percent=100,
                                      index_file=os.path.join(args.data_dir, 'train_images.csv'),
                                      **common)
    val_set = PixelWiseImageLoader(set_name='val', repo_dir=args.data_dir,media_params=test_params, 
                      index_file=os.path.join(args.data_dir, 'val_images.csv'), **common)

    # initialize model object
    layers = gen_model(c, h, w)
    segnet_model = Model(layers=layers)

    # configure callbacks
    callbacks = Callbacks(segnet_model, eval_set=val_set, **args.callback_args)

    opt_gdm = GradientDescentMomentum(1.0e-6, 0.9, wdecay=0.0005, schedule=Schedule())
    opt_biases = GradientDescentMomentum(2.0e-6, 0.9, schedule=Schedule())
    opt_bn = GradientDescentMomentum(1.0e-6, 0.9, schedule=Schedule())
    opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases, 'BatchNorm': opt_bn})

    cost = GeneralizedCost(costfunc=CrossEntropyMulti())

    if args.bench:
        segnet_model.initialize(train_set, cost=cost)
        segnet_model.benchmark(train_set, cost=cost, optimizer=opt)
        sys.exit(0)
    else:
        segnet_model.fit(train_set, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)

    # get the trained segnet model outputs for valisation set
    outs_val = segnet_model.get_outputs(val_set)

    with open('outputs.pkl', 'w') as fid:
        pickle.dump(outs_val, fid, -1)
Example #5
0
import os
from neon import logger as neon_logger
from neon.optimizers import GradientDescentMomentum, Schedule, MultiOptimizer
from neon.transforms import Accuracy
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser

from data import make_train_loader, make_test_loader
from network import create_network

# parse the command line arguments
train_config = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                            'train.cfg')
config_files = [train_config] if os.path.exists(train_config) else []

parser = NeonArgparser(__doc__, default_config_files=config_files)
parser.add_argument('--subset_pct',
                    type=float,
                    default=100,
                    help='subset of training dataset to use (percentage)')
args = parser.parse_args()

random_seed = 0 if args.rng_seed is None else args.rng_seed
model, cost = create_network()

# setup data provider
assert 'train' in args.manifest, "Missing train manifest"
assert 'test' in args.manifest, "Missing validation manifest"

train = make_train_loader(args.manifest['train'], args.manifest_root, model.be,
                          args.subset_pct, random_seed)
Example #6
0
        train_X[i, :] = wave_data

        train_y = ReadCSV("data/train.csv")
        train_y = np.array(train_y[0:1000])

        # test_X = np.zeros((test_dim, nframes), dtype=np.float32)
        #    for i in range(0, test_dim):
        #        filename = path+"test/test%d.aiff" % (i+1)
        #        wave_data_gram = ReadAIFF(filename)
        #        test_X[i, :] = wave_data_gram.reshape(nframes)

    train_X, train_y = shuffle(train_X, train_y)
    return train_X[0:700], train_y[0:700], train_X[700:1000], train_y[700:1000]


parser = NeonArgparser(__doc__)  # Use gpu
args = parser.parse_args()
args.epochs = 50

train_set_x, train_set_y, valid_set_x, valid_set_y = load_data("data/")
train_set = ArrayIterator(train_set_x, train_set_y, nclass=2)
valid_set = ArrayIterator(valid_set_x, valid_set_y, nclass=2)

init_uni = Gaussian(loc=0.0, scale=0.01)

# setup model layers
layers = [DeepBiRNN(4000, init=init_uni, activation=Rectlin()),
          DeepBiRNN(1000, init=init_uni, activation=Rectlin()),
          DeepBiRNN(200, init=init_uni, activation=Rectlin()),
          Affine(nout=2, init=init_uni, activation=Logistic(shortcut=True))]
Example #7
0
    python examples/imagenet_allcnn.py -w </path/to/ImageNet/macrobatches>

"""

from neon.util.argparser import NeonArgparser
from neon.initializers import Kaiming
from neon.optimizers import GradientDescentMomentum, Schedule
from neon.layers import Conv, Dropout, Activation, Pooling, GeneralizedCost, DataTransform
from neon.transforms import Rectlin, Softmax, CrossEntropyMulti, Normalizer
from neon.models import Model
from neon.callbacks.callbacks import Callbacks
from neon.data import ImageLoader

# parse the command line arguments
parser = NeonArgparser(__doc__, default_overrides=dict(batch_size=64))
parser.add_argument('--deconv',
                    action='store_true',
                    help='save visualization data from deconvolution')
parser.add_argument('--subset_pct',
                    type=float,
                    default=100,
                    help='subset of training dataset to use (percentage)')
args = parser.parse_args()

# setup data provider
img_set_options = dict(repo_dir=args.data_dir,
                       inner_size=224,
                       scale_range=256,
                       subset_pct=args.subset_pct)
train = ImageLoader(set_name='train', **img_set_options)
Example #8
0
import os
from neon.util.argparser import NeonArgparser
from neon.optimizers import GradientDescentMomentum, Schedule
from neon.transforms import TopKMisclassification
from neon.callbacks.callbacks import Callbacks

from data import make_alexnet_train_loader, make_validation_loader
from network_allcnn import create_network

# parse the command line arguments (generates the backend)
train_config = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                            'train.cfg')
config_files = [train_config] if os.path.exists(train_config) else []

parser = NeonArgparser(__doc__,
                       default_config_files=config_files,
                       default_overrides=dict(batch_size=64))
parser.add_argument('--deconv',
                    action='store_true',
                    help='save visualization data from deconvolution')
parser.add_argument('--subset_pct',
                    type=float,
                    default=100,
                    help='subset of training dataset to use (percentage)')
args = parser.parse_args()

model, cost = create_network()
rseed = 0 if args.rng_seed is None else args.rng_seed

# setup data provider
assert 'train' in args.manifest, "Missing train manifest"
Example #9
0
from datetime import datetime
from neon.callbacks.callbacks import Callbacks, GANCostCallback
from neon.callbacks.plotting_callbacks import GANPlotCallback
from neon.optimizers import RMSProp
from neon.util.argparser import NeonArgparser
from neon.util.persist import ensure_dirs_exist
from network_gan import create_model
from lsun_data import make_loader

# parse the command line arguments
train_config = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                            'train.cfg')
config_files = [train_config] if os.path.exists(train_config) else []
parser = NeonArgparser(__doc__,
                       default_config_files=config_files,
                       default_overrides={
                           'rng_seed': 0,
                           'batch_size': 64
                       })
parser.add_argument('-D',
                    '--dmodel',
                    type=str,
                    default='dc',
                    help='discriminator model type: dc or mlp, default dc')
parser.add_argument('-G',
                    '--gmodel',
                    type=str,
                    default='dc',
                    help='generator model type: dc or mlp, default dc')
parser.add_argument(
    '--subset_pct',
    type=float,
Example #10
0
def main():
    # parse the command line arguments
    parser = NeonArgparser(__doc__)

    args = parser.parse_args()

    logger = logging.getLogger()
    logger.setLevel(args.log_thresh)

    #Set up batch iterator for training images
    train = ImgMaster(repo_dir='spectroDataTmp',
                      set_name='train',
                      inner_size=400,
                      subset_pct=100)
    val = ImgMaster(repo_dir='spectroDataTmp',
                    set_name='validation',
                    inner_size=400,
                    subset_pct=100,
                    do_transforms=False)
    test = ImgMaster(repo_dir='spectroTestDataTmp',
                     set_name='validation',
                     inner_size=400,
                     subset_pct=100,
                     do_transforms=False)

    train.init_batch_provider()
    test.init_batch_provider()

    print "Constructing network..."
    model = constuct_network()

    model.load_weights(args.model_file)

    #Optimizer
    opt = Adadelta()

    # configure callbacks
    valmetric = TopKMisclassification(k=5)
    callbacks = Callbacks(model,
                          train,
                          eval_set=val,
                          metric=valmetric,
                          **args.callback_args)

    cost = GeneralizedCost(costfunc=CrossEntropyMulti())

    #flag = input("Press Enter if you want to begin training process.")
    print "Training network..."
    print args.epochs
    model.fit(train,
              optimizer=opt,
              num_epochs=args.epochs,
              cost=cost,
              callbacks=callbacks)
    mets = model.eval(test, metric=valmetric)

    print 'Validation set metrics:'
    print 'LogLoss: %.2f, Accuracy: %.1f %%0 (Top-1), %.1f %% (Top-5)' % (
        mets[0], (1.0 - mets[1]) * 100, (1.0 - mets[2]) * 100)
    test.exit_batch_provider()
    train.exit_batch_provider()
Example #11
0
from neon import logger as neon_logger
from neon.data import Shakespeare
from neon.initializers import Uniform
from neon.layers import GeneralizedCost, LSTM, Affine
from neon.models import Model
from neon.optimizers import RMSProp
from neon.transforms import Logistic, Tanh, Softmax, CrossEntropyMulti
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser

# parse the command line arguments
default_overrides = dict(save_path='rnn_text_gen.pickle',
                         serialize=1,
                         batch_size=64)

parser = NeonArgparser(__doc__, default_overrides=default_overrides)
args = parser.parse_args()

# hyperparameters
time_steps = 64
hidden_size = 512
gradient_clip_value = 5

# download shakespeare text
dataset = Shakespeare(time_steps, path=args.data_dir)
train_set = dataset.train_iter
valid_set = dataset.valid_iter

# weight initialization
init = Uniform(low=-0.08, high=0.08)
Example #12
0
                    file_name = os.path.join(image_dir, 'image_{}.jpg'.format(n))
                    img.save(file_name)
                    n = n + 1

    return (all_boxes, all_gt_boxes)


if __name__ == '__main__':
    """

    Simple example of using the dataloader with pre-generated augmentation data

    """
    arg_defaults = {'batch_size': 0}

    parser = NeonArgparser(__doc__, default_overrides=arg_defaults)
    parser.add_argument('--ssd_config', action='append', required=True, help='ssd json file path')
    parser.add_argument('--height', type=int, help='image height')
    parser.add_argument('--width', type=int, help='image width')
    parser.add_argument('--num_images', type=int, default=0, help='number of images to plot')
    parser.add_argument('--image_dir', type=str, help='folder to save sampled images')
    parser.add_argument('--score_threshold', type=float, help='threshold for predicted scores.')
    parser.add_argument('--output', type=str, help='file to save detected boxes.')
    args = parser.parse_args(gen_be=False)
    if args.model_file is None:
        parser.print_usage()
        exit('You need to specify model file to evaluate.')

    if args.ssd_config:
        args.ssd_config = {k: v for k, v in [ss.split(':') for ss in args.ssd_config]}