def score( result_folder, epoch=None, use_cpu=False): """Test a network on the dataset.""" if use_cpu: bnet.set_mode_cpu() else: bnet.set_mode_gpu() _LOGGER.info("Loading data...") tr_data, _ = training_data() te_data, te_labels = test_data() from data import _MEAN _LOGGER.info("Loading network...") # Load the model. model, _, _, _ = _model(result_folder, tr_data.shape[0], epoch=epoch, no_solver=True) _LOGGER.info("Predicting...") results = model.predict(te_data, test_callbacks=[ RandCropMonitor('data', _MEAN), mnt.ProgressIndicator() ], out_blob_names=['score']) _LOGGER.info("Accuracy: %f.", accuracy_score(te_labels, np.argmax(np.array(results), axis=1)))
def test_image( result_folder, epoch=None, image_idx=0, use_cpu=False): """Test a network on one test image.""" if use_cpu: bnet.set_mode_cpu() else: bnet.set_mode_gpu() _LOGGER.info("Loading data...") tr_data, _ = training_data() te_data, _ = test_data() from data import _MEAN _LOGGER.info("Loading network...") # Load the model for training. model, _, _, _ = _model(result_folder, tr_data.shape[0], epoch=epoch) _LOGGER.info("Predicting...") results = model.predict(te_data[:image_idx + 1], test_callbacks=[ RandCropMonitor('data', _MEAN), mnt.ProgressIndicator() ], out_blob_names=['score']) _LOGGER.info("Prediction for image %d: %s.", image_idx, str(results[image_idx]))
def test_image(result_folder, epoch=None, image_idx=0, use_cpu=False): """Test a network on one test image.""" if use_cpu: bnet.set_mode_cpu() else: bnet.set_mode_gpu() _LOGGER.info("Loading data...") tr_data, _ = training_data() te_data, _ = test_data() _LOGGER.info("Loading network...") # Load the model for training. model, _, _, _ = _model(result_folder, tr_data.shape[0], epoch=epoch) _LOGGER.info("Predicting...") results = model.predict(te_data, test_callbacks=[mnt.ProgressIndicator()]) _LOGGER.info("Prediction for image %d: %s.", image_idx, str(results[image_idx]))
def test_image( result_folder, epoch=None, image_idx=0, use_cpu=False): """Test a network on one test image.""" if use_cpu: bnet.set_mode_cpu() else: bnet.set_mode_gpu() _LOGGER.info("Loading data...") tr_data, _ = training_data() te_data, _ = test_data() _LOGGER.info("Loading network...") # Load the model for training. model, _, _, _ = _model(result_folder, tr_data.shape[0], epoch=epoch) _LOGGER.info("Predicting...") results = model.predict(te_data, test_callbacks=[mnt.ProgressIndicator()]) _LOGGER.info("Prediction for image %d: %s.", image_idx, str(results[image_idx]))
def cli(result_folder, model_name=None, epoch=None, num_epoch=150, optimizer_name='sgd', lr_param=0.1, lr_decay_sched='90,135', lr_decay_ratio=0.1, mom_param=0.9, wd_param=0.0001, monitor=False, allow_overwrite=False, use_cpu=False): """Train a model.""" print("Parameters: ", sys.argv) if use_cpu: bnet.set_mode_cpu() else: bnet.set_mode_gpu() # Load the data. tr_data, tr_labels = training_data() te_data, te_labels = test_data() from data import _MEAN # Setup the output folder, including logging. model, optimizer, out_folder, base_iter = _model( result_folder, tr_data.shape[0], model_name, epoch, 10, optimizer_name, lr_param, lr_decay_sched, lr_decay_ratio, mom_param, wd_param, False, allow_overwrite) batch_size = model.blobs['data'].shape[0] logger = mnt.JSONLogger(str(out_folder), 'model', { 'train': ['train_loss', 'train_accuracy'], 'test': ['test_loss', 'test_accuracy'] }, base_iter=base_iter, write_every=round_to_mbsize(10000, batch_size), create_plot=monitor) progr_ind = mnt.ProgressIndicator() cropper = RandCropMonitor('data', _MEAN) if monitor: extra_monitors = [ mnt.ActivationMonitor(round_to_mbsize(10000, batch_size), os.path.join(str(out_folder), 'visualizations' + os.sep), selected_blobs=['resblock3_out', 'avpool'], sample={'data': tr_data[0]}), mnt.FilterMonitor(round_to_mbsize(10000, batch_size), os.path.join(str(out_folder), 'visualizations' + os.sep), selected_parameters={ 'resblock1_conv1': [0], 'resblock3_conv1': [0], 'resblock7_conv1': [0] }), mnt.GradientMonitor(round_to_mbsize(10000, batch_size), os.path.join(str(out_folder), 'visualizations' + os.sep), relative=True, selected_parameters={ 'resblock1_conv1': [0, 1], 'resblock3_conv1': [0, 1], 'resblock7_conv1': [0, 1] }), ] else: extra_monitors = [] model.fit(round_to_mbsize(num_epoch * 50000, batch_size), optimizer, X={ 'data': tr_data, 'labels': tr_labels }, X_val={ 'data': te_data, 'labels': te_labels }, test_interval=round_to_mbsize(50000, batch_size), train_callbacks=[ progr_ind, logger, mnt.RotatingMirroringMonitor({'data': 0}, 0, 0.5), cropper, mnt.Checkpointer(os.path.join(str(out_folder), 'model'), round_to_mbsize(50000 * 10, batch_size), base_iterations=base_iter), ] + extra_monitors, test_callbacks=[progr_ind, cropper, logger], shuffle=True)
def cli(result_folder, model_name=None, epoch=None, num_epoch=3, optimizer_name='sgd', lr_param=0.001, lr_decay_sched='90,135', lr_decay_ratio=0.1, mom_param=0.9, wd_param=0.0001, monitor=False, allow_overwrite=False, use_cpu=False): """Train a model.""" print("Parameters: ", sys.argv) if use_cpu: bnet.set_mode_cpu() else: bnet.set_mode_gpu() # Load the data. tr_data, tr_labels = training_data() te_data, te_labels = test_data() # Setup the output folder, including logging. model, optimizer, out_folder, base_iter = _model( result_folder, tr_data.shape[0], model_name, epoch, 1, optimizer_name, lr_param, lr_decay_sched, lr_decay_ratio, mom_param, wd_param, False, allow_overwrite) batch_size = model.blobs['data'].shape[0] logger = mnt.JSONLogger(str(out_folder), 'model', {'train': ['train_loss', 'train_accuracy'], 'test': ['test_loss', 'test_accuracy']}, base_iter=base_iter, write_every=round_to_mbsize(50000, batch_size), create_plot=monitor) progr_ind = mnt.ProgressIndicator() if monitor: extra_monitors = [ mnt.ActivationMonitor(round_to_mbsize(10000, batch_size), os.path.join(str(out_folder), 'visualizations' + os.sep), sample={'data': tr_data[0]}), mnt.FilterMonitor(round_to_mbsize(10000, batch_size), os.path.join(str(out_folder), 'visualizations' + os.sep)), mnt.GradientMonitor(round_to_mbsize(10000, batch_size), os.path.join(str(out_folder), 'visualizations' + os.sep), relative=True), ] else: extra_monitors = [] model.fit(round_to_mbsize(num_epoch * tr_data.shape[0], batch_size), optimizer, X={'data': tr_data, 'labels': tr_labels}, X_val={'data': te_data, 'labels': te_labels}, test_interval=round_to_mbsize(tr_data.shape[0], batch_size), train_callbacks=[ progr_ind, logger, mnt.Checkpointer(os.path.join(str(out_folder), 'model'), round_to_mbsize(tr_data.shape[0], batch_size), base_iterations=base_iter), ] + extra_monitors, test_callbacks=[ progr_ind, logger])
def cli( result_folder, model_name=None, epoch=None, num_epoch=150, optimizer_name="sgd", lr_param=0.1, lr_decay_sched="90,135", lr_decay_ratio=0.1, mom_param=0.9, wd_param=0.0001, monitor=False, allow_overwrite=False, use_cpu=False, ): """Train a model.""" print("Parameters: ", sys.argv) if use_cpu: bnet.set_mode_cpu() else: bnet.set_mode_gpu() # Load the data. tr_data, tr_labels = training_data() te_data, te_labels = test_data() from data import _MEAN # Setup the output folder, including logging. model, optimizer, out_folder, base_iter = _model( result_folder, tr_data.shape[0], model_name, epoch, 10, optimizer_name, lr_param, lr_decay_sched, lr_decay_ratio, mom_param, wd_param, False, allow_overwrite, ) batch_size = model.blobs["data"].shape[0] logger = mnt.JSONLogger( str(out_folder), "model", {"train": ["train_loss", "train_accuracy"], "test": ["test_loss", "test_accuracy"]}, base_iter=base_iter, write_every=round_to_mbsize(10000, batch_size), create_plot=monitor, ) progr_ind = mnt.ProgressIndicator() cropper = RandCropMonitor("data", _MEAN) if monitor: extra_monitors = [ mnt.ActivationMonitor( round_to_mbsize(10000, batch_size), os.path.join(str(out_folder), "visualizations" + os.sep), selected_blobs=["resblock3_out", "avpool"], sample={"data": tr_data[0]}, ), mnt.FilterMonitor( round_to_mbsize(10000, batch_size), os.path.join(str(out_folder), "visualizations" + os.sep), selected_parameters={"resblock1_conv1": [0], "resblock3_conv1": [0], "resblock7_conv1": [0]}, ), mnt.GradientMonitor( round_to_mbsize(10000, batch_size), os.path.join(str(out_folder), "visualizations" + os.sep), relative=True, selected_parameters={"resblock1_conv1": [0, 1], "resblock3_conv1": [0, 1], "resblock7_conv1": [0, 1]}, ), ] else: extra_monitors = [] model.fit( round_to_mbsize(num_epoch * 50000, batch_size), optimizer, X={"data": tr_data, "labels": tr_labels}, X_val={"data": te_data, "labels": te_labels}, test_interval=round_to_mbsize(50000, batch_size), train_callbacks=[ progr_ind, logger, mnt.RotatingMirroringMonitor({"data": 0}, 0, 0.5), cropper, mnt.Checkpointer( os.path.join(str(out_folder), "model"), round_to_mbsize(50000 * 10, batch_size), base_iterations=base_iter, ), ] + extra_monitors, test_callbacks=[progr_ind, cropper, logger], shuffle=True, )