コード例 #1
0
def main():
    # parse the command line arguments
    parser = NeonArgparser(__doc__)

    args = parser.parse_args()

    logger = logging.getLogger()
    logger.setLevel(args.log_thresh)

    # Set up batch iterator for training images
    train = ImgMaster(repo_dir="spectroDataTmp", set_name="train", inner_size=400, subset_pct=100)
    val = ImgMaster(
        repo_dir="spectroDataTmp", set_name="validation", inner_size=400, subset_pct=100, do_transforms=False
    )
    test = ImgMaster(
        repo_dir="spectroTestDataTmp", set_name="validation", inner_size=400, subset_pct=100, do_transforms=False
    )

    train.init_batch_provider()
    test.init_batch_provider()

    print "Constructing network..."
    model = constuct_network()

    model.load_weights(args.model_file)

    # Optimizer
    opt = Adadelta()

    # configure callbacks
    valmetric = TopKMisclassification(k=5)
    callbacks = Callbacks(model, train, eval_set=val, metric=valmetric, **args.callback_args)

    cost = GeneralizedCost(costfunc=CrossEntropyMulti())

    # flag = input("Press Enter if you want to begin training process.")
    print "Training network..."
    print args.epochs
    model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
    mets = model.eval(test, metric=valmetric)

    print "Validation set metrics:"
    print "LogLoss: %.2f, Accuracy: %.1f %%0 (Top-1), %.1f %% (Top-5)" % (
        mets[0],
        (1.0 - mets[1]) * 100,
        (1.0 - mets[2]) * 100,
    )
    test.exit_batch_provider()
    train.exit_batch_provider()
コード例 #2
0
def main():
	# parse the command line arguments
	parser = NeonArgparser(__doc__)

	args = parser.parse_args()

	logger = logging.getLogger()
	logger.setLevel(args.log_thresh)

	#Set up batch iterator for training images
	print "Setting up data batch loaders..."
	train = ImgMaster(repo_dir='dataTmp', set_name='train', inner_size=120, subset_pct=100)
	val = ImgMaster(repo_dir='dataTmp', set_name='train', inner_size=120, subset_pct=100, do_transforms=False)
	test = ImgMaster(repo_dir='dataTestTmp', set_name='train', inner_size=120, subset_pct=100, do_transforms=False)

	train.init_batch_provider()
	val.init_batch_provider()
	test.init_batch_provider()

	print "Constructing network..."
	#Create AlexNet architecture
	model = constuct_network()

	#model.load_weights(args.model_file)

	# drop weights LR by 1/250**(1/3) at epochs (23, 45, 66), drop bias LR by 1/10 at epoch 45
	weight_sched = Schedule([22, 44, 65, 90, 97], (1/250.)**(1/3.))
	opt_gdm = GradientDescentMomentum(0.01, 0.9, wdecay=0.005, schedule=weight_sched)
	opt_biases = GradientDescentMomentum(0.04, 1.0, schedule=Schedule([130],.1))
	opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases})

	# configure callbacks
	valmetric = TopKMisclassification(k=5)
	callbacks = Callbacks(model, train, eval_set=val, metric=valmetric, **args.callback_args)

	cost = GeneralizedCost(costfunc=CrossEntropyMulti())

	#flag = input("Press Enter if you want to begin training process.")
	print "Training network..."
	model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
	mets = model.eval(test, metric=valmetric)

	print 'Validation set metrics:'
	print 'LogLoss: %.2f, Accuracy: %.1f %%0 (Top-1), %.1f %% (Top-5)' % (mets[0], 
																		(1.0-mets[1])*100,
																		(1.0-mets[2])*100)
	test.exit_batch_provider()
	val.exit_batch_provider()
	train.exit_batch_provider()
コード例 #3
0
ファイル: alexnet.py プロジェクト: sunclx/neon
if args.model_file:
    import os
    assert os.path.exists(args.model_file), '%s not found' % args.model_file
    mlp.load_weights(args.model_file)

# configure callbacks
callbacks = Callbacks(mlp, train, output_file=args.output_file)

if args.validation_freq:
    class TopKMetrics(Callback):
        def __init__(self, valid_set, epoch_freq=args.validation_freq):
            super(TopKMetrics, self).__init__(epoch_freq=epoch_freq)
            self.valid_set = valid_set

        def on_epoch_end(self, epoch):
            self.valid_set.reset()
            allmetrics = TopKMisclassification(k=5)
            stats = mlp.eval(self.valid_set, metric=allmetrics)
            print ", ".join(allmetrics.metric_names) + ": " + ", ".join(map(str, stats.flatten()))

    callbacks.add_callback(TopKMetrics(test))

if args.save_path:
    checkpoint_schedule = range(1, args.epochs)
    callbacks.add_serialize_callback(checkpoint_schedule, args.save_path, history=2)

mlp.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)

test.exit_batch_provider()
train.exit_batch_provider()
コード例 #4
0
ファイル: alexnet.py プロジェクト: puneeth579/neon
if args.validation_freq:

    class TopKMetrics(Callback):
        def __init__(self, valid_set, epoch_freq=args.validation_freq):
            super(TopKMetrics, self).__init__(epoch_freq=epoch_freq)
            self.valid_set = valid_set

        def on_epoch_end(self, epoch):
            self.valid_set.reset()
            allmetrics = TopKMisclassification(k=5)
            stats = mlp.eval(self.valid_set, metric=allmetrics)
            print ", ".join(allmetrics.metric_names) + ": " + ", ".join(
                map(str, stats.flatten()))

    callbacks.add_callback(TopKMetrics(test))

if args.save_path:
    checkpoint_schedule = range(args.epochs)
    callbacks.add_serialize_callback(checkpoint_schedule,
                                     args.save_path,
                                     history=2)

mlp.fit(train,
        optimizer=opt,
        num_epochs=args.epochs,
        cost=cost,
        callbacks=callbacks)

test.exit_batch_provider()
train.exit_batch_provider()
コード例 #5
0
ファイル: cropped_CNN.py プロジェクト: mwoodson1/3-stream-rnn
def main():
    # parse the command line arguments
    parser = NeonArgparser(__doc__)

    args = parser.parse_args()

    logger = logging.getLogger()
    logger.setLevel(args.log_thresh)

    #Set up batch iterator for training images
    print "Setting up data batch loaders..."
    train = ImgMaster(repo_dir='dataTmp',
                      set_name='train',
                      inner_size=120,
                      subset_pct=100)
    val = ImgMaster(repo_dir='dataTmp',
                    set_name='train',
                    inner_size=120,
                    subset_pct=100,
                    do_transforms=False)
    test = ImgMaster(repo_dir='dataTestTmp',
                     set_name='train',
                     inner_size=120,
                     subset_pct=100,
                     do_transforms=False)

    train.init_batch_provider()
    val.init_batch_provider()
    test.init_batch_provider()

    print "Constructing network..."
    #Create AlexNet architecture
    model = constuct_network()

    #model.load_weights(args.model_file)

    # drop weights LR by 1/250**(1/3) at epochs (23, 45, 66), drop bias LR by 1/10 at epoch 45
    weight_sched = Schedule([22, 44, 65, 90, 97], (1 / 250.)**(1 / 3.))
    opt_gdm = GradientDescentMomentum(0.01,
                                      0.9,
                                      wdecay=0.005,
                                      schedule=weight_sched)
    opt_biases = GradientDescentMomentum(0.04,
                                         1.0,
                                         schedule=Schedule([130], .1))
    opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases})

    # configure callbacks
    valmetric = TopKMisclassification(k=5)
    callbacks = Callbacks(model,
                          train,
                          eval_set=val,
                          metric=valmetric,
                          **args.callback_args)

    cost = GeneralizedCost(costfunc=CrossEntropyMulti())

    #flag = input("Press Enter if you want to begin training process.")
    print "Training network..."
    model.fit(train,
              optimizer=opt,
              num_epochs=args.epochs,
              cost=cost,
              callbacks=callbacks)
    mets = model.eval(test, metric=valmetric)

    print 'Validation set metrics:'
    print 'LogLoss: %.2f, Accuracy: %.1f %%0 (Top-1), %.1f %% (Top-5)' % (
        mets[0], (1.0 - mets[1]) * 100, (1.0 - mets[2]) * 100)
    test.exit_batch_provider()
    val.exit_batch_provider()
    train.exit_batch_provider()
コード例 #6
0
def main():
    # parse the command line arguments
    parser = NeonArgparser(__doc__)

    args = parser.parse_args()

    logger = logging.getLogger()
    logger.setLevel(args.log_thresh)

    #Set up batch iterator for training images
    train = ImgMaster(repo_dir='spectroDataTmp',
                      set_name='train',
                      inner_size=400,
                      subset_pct=100)
    val = ImgMaster(repo_dir='spectroDataTmp',
                    set_name='validation',
                    inner_size=400,
                    subset_pct=100,
                    do_transforms=False)
    test = ImgMaster(repo_dir='spectroTestDataTmp',
                     set_name='validation',
                     inner_size=400,
                     subset_pct=100,
                     do_transforms=False)

    train.init_batch_provider()
    test.init_batch_provider()

    print "Constructing network..."
    model = constuct_network()

    model.load_weights(args.model_file)

    #Optimizer
    opt = Adadelta()

    # configure callbacks
    valmetric = TopKMisclassification(k=5)
    callbacks = Callbacks(model,
                          train,
                          eval_set=val,
                          metric=valmetric,
                          **args.callback_args)

    cost = GeneralizedCost(costfunc=CrossEntropyMulti())

    #flag = input("Press Enter if you want to begin training process.")
    print "Training network..."
    print args.epochs
    model.fit(train,
              optimizer=opt,
              num_epochs=args.epochs,
              cost=cost,
              callbacks=callbacks)
    mets = model.eval(test, metric=valmetric)

    print 'Validation set metrics:'
    print 'LogLoss: %.2f, Accuracy: %.1f %%0 (Top-1), %.1f %% (Top-5)' % (
        mets[0], (1.0 - mets[1]) * 100, (1.0 - mets[2]) * 100)
    test.exit_batch_provider()
    train.exit_batch_provider()