def test_set_augmentation(self): img_path = os.path.join(base_path, '../test_data/shapes/pixels/*') label_path = os.path.join(base_path, '../test_data/shapes/labels.ilp') t = Session() t.load_training_data(img_path, label_path) t.make_model('unet_2d', (1, 572, 572)) t.set_augmentation('flip+rotate') self.assertEqual(t.data.augmentation, {'rotate', 'flip'}) t.set_augmentation('flip+rotate+shear') self.assertEqual(t.data.augmentation, {'rotate', 'flip', 'shear'}) t.set_augmentation('flip') self.assertEqual(t.data.augmentation, {'flip'})
def main(args): if args['--cpu']: # deactivate gpu for tensorflow os.environ['CUDA_VISIBLE_DEVICES'] = '' if args['--gpu']: # define gpu hardware os.environ['CUDA_VISIBLE_DEVICES'] = args['--gpu'] image_path = os.path.abspath(os.path.expanduser(args['<image_path>'])) model_name = args['<network>'] if args['deploy']: output_path = args['<output_path>'] predict_example_image = not args['--skip-predict'] assert os.path.isfile(model_name) exp = DeepimagejExporter(model_name, output_path, image_path) exp.export_as_deepimagej(author=args['--author'], version=args['--version'], url=args['--url'], credit=args['--credit'], reference=args['--reference'], size=args['--size'], applymodel=predict_example_image) return norm_string = args['--normalize'] s = Session() if args['train']: label_path = os.path.abspath(os.path.expanduser(args['<label_path>'])) aug_string = args['--augment'] max_epochs = int(args['--epochs']) steps_per_epoch = int(args['--steps']) log_filename = args['--csvfile'] model_export_filename = args['--file'] valfraction = float(args['--valfraction']) if args['--batchsize']: batch_size = int(args['--batchsize']) else: batch_size = None s.load_training_data(image_path, label_path, batch_size) models_available = ['unet_2d', 'unet_multi_z', 'convnet_for_unittest'] if os.path.isfile(model_name): s.load_model(model_name) elif model_name in models_available: size_xy = 572 if model_name == 'unet_2d' or model_name == 'convnet_for_unittest': size_z = 1 if model_name == 'unet_multi_z': size_z = 5 if model_name == 'convnet_for_unittest': size_xy = 100 s.make_model(model_name, (size_z, size_xy, size_xy)) s.set_normalization(norm_string) s.set_augmentation(aug_string) if valfraction > 0: s.define_validation_data(valfraction) s.train(max_epochs=max_epochs, steps_per_epoch=steps_per_epoch, log_filename=log_filename, model_filename=model_export_filename) f = h5py.File(model_export_filename, 'r+') lbl_map_list = [[key, val] for key, val in s.lbl_map.items()] f.create_dataset('lbl_map', data=lbl_map_list) if args['predict']: output_path = args['<output_path>'] assert os.path.isfile(model_name), '<network> must be a h5 model file' multichannel = not (args['--split']) s.load_prediction_data(image_path, output_path) s.load_model(model_name) s.set_normalization(norm_string) s.predict(multichannel=multichannel)
def main(args): s = Session() if args['--cpu']: # deactivate gpu for tensorflow os.environ['CUDA_VISIBLE_DEVICES'] = '' if args['--gpu']: # define gpu hardware os.environ['CUDA_VISIBLE_DEVICES'] = args['--gpu'] image_path = os.path.abspath(os.path.expanduser(args['<image_path>'])) model_name = args['<network>'] norm_string = args['--normalize'] if args['train']: label_path = os.path.abspath(os.path.expanduser(args['<label_path>'])) aug_string = args['--augment'] max_epochs = int(args['--epochs']) steps_per_epoch = int(args['--steps']) log_filename = args['--csvfile'] model_export_filename = args['--file'] valfraction = float(args['--valfraction']) s.load_training_data(image_path, label_path) models_available = ['unet_2d', 'unet_multi_z', 'convnet_for_unittest'] if os.path.isfile(model_name): s.load_model(model_name) elif model_name in models_available: size_xy = 572 if model_name == 'unet_2d' or model_name == 'convnet_for_unittest': size_z = 1 if model_name == 'unet_multi_z': size_z = 5 if model_name == 'convnet_for_unittest': size_xy = 100 s.make_model(model_name, (size_z, size_xy, size_xy)) s.set_normalization(norm_string) s.set_augmentation(aug_string) if valfraction > 0: s.define_validation_data(valfraction) s.train(max_epochs=max_epochs, steps_per_epoch=steps_per_epoch, log_filename=log_filename, model_filename=model_export_filename) if args['predict']: output_path = args['<output_path>'] assert os.path.isfile(model_name), '<network> must be a h5 model file' s.load_prediction_data(image_path, output_path) s.load_model(model_name) s.set_normalization(norm_string) s.predict()