valmetric = TopKMisclassification(k=5) # dummy optimizer for benchmarking # training implementation coming soon opt_gdm = GradientDescentMomentum(0.0, 0.0) opt_biases = GradientDescentMomentum(0.0, 0.0) opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases}) # setup cost function as CrossEntropy cost = Multicost(costs=[GeneralizedCost(costfunc=CrossEntropyMulti()), GeneralizedCost(costfunc=CrossEntropyMulti()), GeneralizedCost(costfunc=CrossEntropyMulti())], weights=[1, 0., 0.]) # We only want to consider the CE of the main path assert os.path.exists(args.model_file), 'script requires the trained weights file' model.load_params(args.model_file) model.initialize(test, cost) print 'running speed benchmark...' model.benchmark(test, cost, opt) print '\nCalculating performance on validation set...' test.reset() mets = model.eval(test, metric=valmetric) print 'Validation set metrics:' print 'LogLoss: %.2f, Accuracy: %.1f %% (Top-1), %.1f %% (Top-5)' % (mets[0], (1.0-mets[1])*100, (1.0-mets[2])*100)
save_path='serialize_test.pkl') lr_sched = PolySchedule(total_epochs=10, power=0.5) opt_gdm = GradientDescentMomentum(0.01, 0.9, wdecay=0.0002, schedule=lr_sched) opt_biases = GradientDescentMomentum(0.02, 0.9, schedule=lr_sched) opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases}) if not args.resume: # fit the model for 3 epochs model.fit(train, optimizer=opt, num_epochs=3, cost=cost, callbacks=callbacks) train.reset() # get 1 image for im, l in train: break train.exit_batch_provider() save_obj((im.get(), l.get()), 'im1.pkl') im_save = im.get().copy() if args.resume: (im2, l2) = load_obj('im1.pkl') im.set(im2) l.set(l2) # run fprop and bprop on this minibatch save the results out_fprop = model.fprop(im) out_fprop_save = [x.get() for x in out_fprop]
model = Model(layers=pdict, dataset=train) # configure callbacks callbacks = Callbacks(model, progress_bar=True, output_file='temp1.h5', serialize=1, history=3, save_path='serialize_test.pkl') lr_sched = PolySchedule(total_epochs=10, power=0.5) opt_gdm = GradientDescentMomentum(0.01, 0.9, wdecay=0.0002, schedule=lr_sched) opt_biases = GradientDescentMomentum(0.02, 0.9, schedule=lr_sched) opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases}) if not args.resume: # fit the model for 3 epochs model.fit(train, optimizer=opt, num_epochs=3, cost=cost, callbacks=callbacks) train.reset() # get 1 image for im, l in train: break train.exit_batch_provider() with open('im1.pkl', 'w') as fid: pickle.dump((im.get(), l.get()), fid) im_save = im.get().copy() if args.resume: with open('im1.pkl', 'r') as fid: (im2, l2) = pickle.load(fid) im.set(im2) l.set(l2) # run fprop and bprop on this minibatch save the results out_fprop = model.fprop(im)
# dummy optimizer for benchmarking # training implementation coming soon opt_gdm = GradientDescentMomentum(0.0, 0.0) opt_biases = GradientDescentMomentum(0.0, 0.0) opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases}) # setup cost function as CrossEntropy cost = Multicost( costs=[ GeneralizedCost(costfunc=CrossEntropyMulti()), GeneralizedCost(costfunc=CrossEntropyMulti()), GeneralizedCost(costfunc=CrossEntropyMulti()) ], weights=[1, 0., 0.]) # We only want to consider the CE of the main path assert os.path.exists( args.model_file), 'script requires the trained weights file' model.load_params(args.model_file) model.initialize(test, cost) print 'running speed benchmark...' model.benchmark(test, cost, opt) print '\nCalculating performance on validation set...' test.reset() mets = model.eval(test, metric=valmetric) print 'Validation set metrics:' print 'LogLoss: %.2f, Accuracy: %.1f %% (Top-1), %.1f %% (Top-5)' % ( mets[0], (1.0 - mets[1]) * 100, (1.0 - mets[2]) * 100)