コード例 #1
0
def main():
    # parse the command line arguments
    parser = NeonArgparser(__doc__)

    args = parser.parse_args()

    logger = logging.getLogger()
    logger.setLevel(args.log_thresh)

    #Set up batch iterator for training images
    train = ImgMaster(repo_dir='dataTmp_optFlow_BW',
                      set_name='train',
                      inner_size=240,
                      subset_pct=100)
    val = ImgMaster(repo_dir='dataTmp_optFlow_BW',
                    set_name='train',
                    inner_size=240,
                    subset_pct=100,
                    do_transforms=False)
    test = ImgMaster(repo_dir='dataTestTmp_optFlow_BW',
                     set_name='train',
                     inner_size=240,
                     subset_pct=100,
                     do_transforms=False)

    train.init_batch_provider()
    val.init_batch_provider()
    test.init_batch_provider()

    print "Constructing network..."
    #Create AlexNet architecture
    model = constuct_network()

    # Optimzer for model
    opt = Adadelta()

    # configure callbacks
    valmetric = TopKMisclassification(k=5)
    callbacks = Callbacks(model,
                          train,
                          eval_set=test,
                          metric=valmetric,
                          **args.callback_args)

    cost = GeneralizedCost(costfunc=CrossEntropyMulti())

    #flag = input("Press Enter if you want to begin training process.")
    print "Training network..."
    model.fit(train,
              optimizer=opt,
              num_epochs=args.epochs,
              cost=cost,
              callbacks=callbacks)
    mets = model.eval(test, metric=valmetric)

    print 'Validation set metrics:'
    print 'LogLoss: %.2f, Accuracy: %.1f %%0 (Top-1), %.1f %% (Top-5)' % (
        mets[0], (1.0 - mets[1]) * 100, (1.0 - mets[2]) * 100)
    return
コード例 #2
0
ファイル: alexnet.py プロジェクト: puneeth579/neon
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()

# hyperparameters
batch_size = 128

# setup backend
be = gen_backend(backend=args.backend,
                 rng_seed=args.rng_seed,
                 device_id=args.device_id,
                 batch_size=batch_size,
                 default_dtype=args.datatype)

try:
    train = ImgMaster(repo_dir=args.data_dir, inner_size=224, set_name='train')
    test = ImgMaster(repo_dir=args.data_dir,
                     inner_size=224,
                     set_name='validation',
                     do_transforms=False)
except (OSError, IOError, ValueError) as err:
    print err
    sys.exit(0)

train.init_batch_provider()
test.init_batch_provider()

init1 = Gaussian(scale=0.01)
init2 = Gaussian(scale=0.03)
relu = Rectlin()
コード例 #3
0
# hyperparameters
args.batch_size = 64
cost_scale = 1.
VGG = 'B'
use_batch_norm = True
biases = None if use_batch_norm else Constant(0)

# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))

# initialize the data provider
img_set_options = dict(repo_dir=args.data_dir,
                       inner_size=224,
                       dtype=args.datatype,
                       subset_pct=100)
train = ImgMaster(set_name='train', **img_set_options)
test = ImgMaster(set_name='validation', do_transforms=False, **img_set_options)
train.init_batch_provider()
test.init_batch_provider()

init1 = GlorotUniform()
relu = Rectlin()
common_params = dict(init=init1,
                     activation=Rectlin(),
                     batch_norm=use_batch_norm,
                     bias=biases)
conv_params = dict(padding=1, **common_params)

# Set up the model layers, using 3x3 conv stacks with different feature map sizes
layers = []
コード例 #4
0
ファイル: cropped_CNN.py プロジェクト: mwoodson1/3-stream-rnn
def main():
    # parse the command line arguments
    parser = NeonArgparser(__doc__)

    args = parser.parse_args()

    logger = logging.getLogger()
    logger.setLevel(args.log_thresh)

    #Set up batch iterator for training images
    print "Setting up data batch loaders..."
    train = ImgMaster(repo_dir='dataTmp',
                      set_name='train',
                      inner_size=120,
                      subset_pct=100)
    val = ImgMaster(repo_dir='dataTmp',
                    set_name='train',
                    inner_size=120,
                    subset_pct=100,
                    do_transforms=False)
    test = ImgMaster(repo_dir='dataTestTmp',
                     set_name='train',
                     inner_size=120,
                     subset_pct=100,
                     do_transforms=False)

    train.init_batch_provider()
    val.init_batch_provider()
    test.init_batch_provider()

    print "Constructing network..."
    #Create AlexNet architecture
    model = constuct_network()

    #model.load_weights(args.model_file)

    # drop weights LR by 1/250**(1/3) at epochs (23, 45, 66), drop bias LR by 1/10 at epoch 45
    weight_sched = Schedule([22, 44, 65, 90, 97], (1 / 250.)**(1 / 3.))
    opt_gdm = GradientDescentMomentum(0.01,
                                      0.9,
                                      wdecay=0.005,
                                      schedule=weight_sched)
    opt_biases = GradientDescentMomentum(0.04,
                                         1.0,
                                         schedule=Schedule([130], .1))
    opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases})

    # configure callbacks
    valmetric = TopKMisclassification(k=5)
    callbacks = Callbacks(model,
                          train,
                          eval_set=val,
                          metric=valmetric,
                          **args.callback_args)

    cost = GeneralizedCost(costfunc=CrossEntropyMulti())

    #flag = input("Press Enter if you want to begin training process.")
    print "Training network..."
    model.fit(train,
              optimizer=opt,
              num_epochs=args.epochs,
              cost=cost,
              callbacks=callbacks)
    mets = model.eval(test, metric=valmetric)

    print 'Validation set metrics:'
    print 'LogLoss: %.2f, Accuracy: %.1f %%0 (Top-1), %.1f %% (Top-5)' % (
        mets[0], (1.0 - mets[1]) * 100, (1.0 - mets[2]) * 100)
    test.exit_batch_provider()
    val.exit_batch_provider()
    train.exit_batch_provider()
コード例 #5
0
ファイル: vgg_bn.py プロジェクト: suixudongi8/neon
args = parser.parse_args()

# hyperparameters
batch_size = 64
cost_scale = 1.

# setup backend
be = gen_backend(backend=args.backend,
                 rng_seed=args.rng_seed,
                 device_id=args.device_id,
                 batch_size=batch_size,
                 default_dtype=args.datatype)

train = ImgMaster(repo_dir=args.data_dir,
                  inner_size=224,
                  set_name='train',
                  subset_pct=1,
                  dtype=args.datatype)
test = ImgMaster(repo_dir=args.data_dir,
                 inner_size=224,
                 set_name='validation',
                 subset_pct=50,
                 dtype=args.datatype,
                 do_transforms=False)

train.init_batch_provider()
test.init_batch_provider()

init1 = GlorotUniform()
relu = Rectlin()