예제 #1
0
    def test_shape_data_fast(self):

        # train a classifier and predict training data
        os.environ['CUDA_VISIBLE_DEVICES'] = '2'

        img_path = os.path.join(base_path, '../test_data/shapes/pixels/*')
        label_path = os.path.join(base_path, '../test_data/shapes/labels.ilp')
        savepath = os.path.join(base_path, '../test_data/tmp')

        os.makedirs(savepath, exist_ok=True)

        t = Session()
        t.load_training_data(img_path, label_path)
        t.make_model('convnet_for_unittest', (1, 100, 100))

        t.train(max_epochs=3,
                steps_per_epoch=4,
                log_filename=os.path.join(savepath, 'log.csv'),
                model_filename=os.path.join(savepath, 'model.h5'))
        t.load_prediction_data(img_path, savepath)
        t.predict()

        artifacts = os.listdir(savepath)
        assert 'log.csv' in artifacts
        assert 'pixels_1_class_1.tif' in artifacts
        assert 'pixels_1_class_2.tif' in artifacts
        assert 'pixels_1_class_3.tif' in artifacts
        assert 'pixels_2_class_1.tif' in artifacts
        assert 'pixels_2_class_2.tif' in artifacts
        assert 'pixels_2_class_3.tif' in artifacts

        shutil.rmtree(savepath)
예제 #2
0
파일: test_session.py 프로젝트: yapic/yapic
    def test_shape_data(self):

        # train a classifier and predict training data
        os.environ['CUDA_VISIBLE_DEVICES'] = '2'

        img_path = os.path.join(
            base_path,
            '../test_data/shapes/pixels/*')
        label_path = os.path.join(
            base_path,
            '../test_data/shapes/labels.h5')
        savepath = os.path.join(
            base_path,
            '../test_data/tmp')

        os.makedirs(savepath, exist_ok=True)

        t = Session()
        t.load_training_data(img_path, label_path)

        t.make_model('convnet_for_unittest', (1, 100, 100))

        t.train(max_epochs=50,
                steps_per_epoch=50,
                log_filename=os.path.join(savepath, 'log.csv'),
                model_filename=os.path.join(savepath, 'model.h5'))
        t.load_prediction_data(img_path, savepath)
        t.predict()

        # read prediction images and compare with validation data
        def read_images(image_nr, class_nr):
            if class_nr == 1:
                shape = 'circles'
            if class_nr == 2:
                shape = 'triangles'

            filename = os.path.join(
                                savepath,
                                'pixels_{}_class_{}.tif'.format(image_nr,
                                                                class_nr))
            print(filename)
            prediction_img = np.squeeze(imread(filename))
            filename = os.path.join(
                                    savepath,
                                    '../shapes/val/{}_{}.tiff'.format(
                                        shape,
                                        image_nr))
            print(filename)
            val_img = np.squeeze(skimage.io.imread(filename))
            return prediction_img, val_img

        prediction_img, val_img = read_images(1, 1)
        accuracy = np.mean(prediction_img[val_img > 0][:])
        print(accuracy)

        prediction_img, val_img = read_images(1, 2)
        accuracy = np.mean(prediction_img[val_img > 0][:])
        print(accuracy)

        prediction_img, val_img = read_images(2, 1)
        accuracy = np.mean(prediction_img[val_img > 0][:])
        print(accuracy)

        prediction_img, val_img = read_images(2, 2)
        accuracy = np.mean(prediction_img[val_img > 0][:])
        print(accuracy)

        shutil.rmtree(savepath)
예제 #3
0
class DeepimagejExporter(object):
    '''
    A DeepImageJ exporter provides methods to deploy Keras models
    trained with YAPiC to the ImageJ plugin DeepImageJ.

    Parameters
    ----------
    model_path : string
        Path to Keras model in h5 format.
    save_path : string
        Path to directory where the exported model and metadata is saved.
    example_image_path: string
        Path to example input image in tif format.
    '''
    def __init__(self, model_path, save_path, example_image_path):

        save_dir = os.path.dirname(save_path)
        if len(save_dir) == 0:
            save_dir = './'
        msg = '{} does not exist'.format(save_dir)
        assert os.path.isdir(save_dir), msg
        self.save_path = save_path

        self.s = Session()
        self.s.load_prediction_data(example_image_path, self.save_path)
        self.s.load_model(model_path)

        self.model_path = model_path
        self.example_image_path = example_image_path

        msg = 'model is not unet_2d, cannot be exported to deepimagej'
        assert self._is_model_unet_2d(), msg

        self.model_reshaped = None
        self.metadata = None

        self.template_dir = os.path.join(os.path.dirname(__file__),
                                         'templates/deepimagej101')

    def export_as_deepimagej(self,
                             author='n/a',
                             url='http://',
                             credit='n.a',
                             version='n.a',
                             reference='n/a',
                             size='small',
                             applymodel=True):

        self._reshape_unet_2d(size=size)
        self._update_metadata(author=author,
                              version=version,
                              url=url,
                              credit=credit,
                              reference=reference)
        self._export_as_tensorflow_model()
        self._format_xml()

        shutil.copyfile(os.path.join(self.template_dir, 'postprocessing.txt'),
                        os.path.join(self.save_path, 'postprocessing.txt'))
        shutil.copyfile(os.path.join(self.template_dir, 'preprocessing.txt'),
                        os.path.join(self.save_path, 'preprocessing.txt'))

        if applymodel:
            self.apply_model('local')
        shutil.copyfile(self.example_image_path,
                        os.path.join(self.save_path, 'exampleImage.tiff'))

    def apply_model(self, normalization_mode):

        self.s.set_normalization(normalization_mode)
        self.s.predict(multichannel=True)

        result_img_name = os.path.basename(self.example_image_path)
        save_path = os.path.join(self.save_path, result_img_name)
        new_save_path = save_path.replace(result_img_name, 'resultImage.tiff')
        os.rename(save_path, new_save_path)

    def _is_model_unet_2d(self):
        return self.s.model.name == 'unet_2d'
        # return self.s.model.count_params() == 32424323

    def _reshape_unet_2d(self, size='middle'):

        if size == 'small':
            shape_xy = 112
        elif size == 'middle':
            shape_xy = 224
        elif size == 'large':
            shape_xy = 368
        else:
            shape_xy = 112

        print('reshape to {}'.format(shape_xy))
        N_classes = self.s.model.output_shape[-1]
        N_channels = self.s.model.input_shape[-1]

        self.model_reshaped = unet_2d.build_network(
            N_classes, (N_channels, 1, shape_xy, shape_xy),
            squeeze=True,
            padding='same')

        self.model_reshaped.set_weights(self.s.model.get_weights())

    def _update_metadata(self,
                         author='n/a',
                         url='http://',
                         credit='n.a',
                         version='n.a',
                         reference='n/a'):

        if self.model_reshaped is None:
            return
        if self.metadata is None:
            self.metadata = {}

        self.metadata['name'] = os.path.basename(self.save_path)
        self.metadata['author'] = author
        self.metadata['url'] = url
        self.metadata['credit'] = credit
        self.metadata['version'] = version
        self.metadata['reference'] = reference

        date_format = '%a %b %d %H:%M:%S %Z %Y'
        self.metadata['date'] = time.strftime(date_format, time.localtime())

        N_channels = self.model_reshaped.input_shape[-1]
        size_xy = self.model_reshaped.input_shape[2]

        self.metadata['channels'] = N_channels
        self.metadata['input_tensor_dimensions'] = (-1, size_xy, size_xy,
                                                    N_channels)
        self.metadata['patch_size'] = size_xy
        self.metadata['padding'] = int(size_xy * 0.19)

        # metadata = {'name': 'my_model',
        #             'author': 'n/a',
        #             'url': 'http://',
        #             'credit': 'n/a',
        #             'version': 'n/a',
        #             'reference': 'n/a',
        #             'date': 'Tue Mar 31 17:18:06 CEST 2020',
        #             'test_image_size_xy': (512, 512),
        #             'input_tensor_dimensions': (-1, 112, 112, 3),
        #             'patch_size': (112),
        #             'padding': 10}

    def _format_xml(self):

        if self.metadata is None:
            return

        xml_path = os.path.join(self.template_dir, 'config.xml')

        tree = ET.parse(xml_path)

        key_mapping = (
            (('ModelInformation', 'Name'), 'name'),
            (('ModelInformation', 'Author'), 'author'),
            (('ModelInformation', 'URL'), 'url'),
            (('ModelInformation', 'Credit'), 'credit'),
            (('ModelInformation', 'Version'), 'version'),
            (('ModelInformation', 'Date'), 'date'),
            (('ModelInformation', 'Reference'), 'reference'),
            (('ModelCharacteristics', 'Channels'), 'channels'),
            (('ModelCharacteristics', 'InputTensorDimensions'),
             'input_tensor_dimensions'),
            (('ModelCharacteristics', 'PatchSize'), 'patch_size'),
            (('ModelCharacteristics', 'Padding'), 'padding'),
        )

        for item in key_mapping:
            value = str(self.metadata[item[1]])
            if item[1] == 'input_tensor_dimensions':
                value = value.replace('(', ',')\
                             .replace(')', ',')\
                             .replace(' ', '')
            tree.find(item[0][0]).find(item[0][1]).text = value

        save_path = os.path.join(self.save_path, 'config.xml')
        tree.write(save_path)

    def _export_as_tensorflow_model(self):

        model = self.model_reshaped
        builder = tf.saved_model.builder.SavedModelBuilder(self.save_path)

        signature = tf.saved_model.signature_def_utils.predict_signature_def(
            inputs={'input': model.input}, outputs={'output': model.output})

        signature_def_map = {
            tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
            signature
        }

        builder.add_meta_graph_and_variables(
            K.get_session(), [tf.saved_model.tag_constants.SERVING],
            signature_def_map=signature_def_map)
        builder.save()
예제 #4
0
파일: main.py 프로젝트: yapic/yapic
def main(args):

    if args['--cpu']:
        # deactivate gpu for tensorflow
        os.environ['CUDA_VISIBLE_DEVICES'] = ''
    if args['--gpu']:
        # define gpu hardware
        os.environ['CUDA_VISIBLE_DEVICES'] = args['--gpu']

    image_path = os.path.abspath(os.path.expanduser(args['<image_path>']))
    model_name = args['<network>']

    if args['deploy']:
        output_path = args['<output_path>']
        predict_example_image = not args['--skip-predict']

        assert os.path.isfile(model_name)

        exp = DeepimagejExporter(model_name, output_path, image_path)
        exp.export_as_deepimagej(author=args['--author'],
                                 version=args['--version'],
                                 url=args['--url'],
                                 credit=args['--credit'],
                                 reference=args['--reference'],
                                 size=args['--size'],
                                 applymodel=predict_example_image)
        return

    norm_string = args['--normalize']
    s = Session()

    if args['train']:

        label_path = os.path.abspath(os.path.expanduser(args['<label_path>']))
        aug_string = args['--augment']
        max_epochs = int(args['--epochs'])
        steps_per_epoch = int(args['--steps'])
        log_filename = args['--csvfile']
        model_export_filename = args['--file']
        valfraction = float(args['--valfraction'])
        if args['--batchsize']:
            batch_size = int(args['--batchsize'])
        else:
            batch_size = None

        s.load_training_data(image_path, label_path, batch_size)

        models_available = ['unet_2d', 'unet_multi_z', 'convnet_for_unittest']

        if os.path.isfile(model_name):
            s.load_model(model_name)
        elif model_name in models_available:
            size_xy = 572
            if model_name == 'unet_2d' or model_name == 'convnet_for_unittest':
                size_z = 1
            if model_name == 'unet_multi_z':
                size_z = 5
            if model_name == 'convnet_for_unittest':
                size_xy = 100

            s.make_model(model_name, (size_z, size_xy, size_xy))

        s.set_normalization(norm_string)
        s.set_augmentation(aug_string)

        if valfraction > 0:
            s.define_validation_data(valfraction)

        s.train(max_epochs=max_epochs,
                steps_per_epoch=steps_per_epoch,
                log_filename=log_filename,
                model_filename=model_export_filename)

        f = h5py.File(model_export_filename, 'r+')
        lbl_map_list = [[key, val] for key, val in s.lbl_map.items()]
        f.create_dataset('lbl_map', data=lbl_map_list)

    if args['predict']:
        output_path = args['<output_path>']
        assert os.path.isfile(model_name), '<network> must be a h5 model file'

        multichannel = not (args['--split'])

        s.load_prediction_data(image_path, output_path)
        s.load_model(model_name)
        s.set_normalization(norm_string)
        s.predict(multichannel=multichannel)
예제 #5
0
def main(args):

    s = Session()

    if args['--cpu']:
        # deactivate gpu for tensorflow
        os.environ['CUDA_VISIBLE_DEVICES'] = ''
    if args['--gpu']:
        # define gpu hardware
        os.environ['CUDA_VISIBLE_DEVICES'] = args['--gpu']

    image_path = os.path.abspath(os.path.expanduser(args['<image_path>']))
    model_name = args['<network>']
    norm_string = args['--normalize']


    if args['train']:

        label_path = os.path.abspath(os.path.expanduser(args['<label_path>']))
        aug_string = args['--augment']
        max_epochs = int(args['--epochs'])
        steps_per_epoch = int(args['--steps'])
        log_filename = args['--csvfile']
        model_export_filename = args['--file']
        valfraction = float(args['--valfraction'])

        s.load_training_data(image_path, label_path)

        models_available = ['unet_2d',
                            'unet_multi_z',
                            'convnet_for_unittest']

        if os.path.isfile(model_name):
            s.load_model(model_name)
        elif model_name in models_available:
            size_xy = 572
            if model_name == 'unet_2d' or model_name == 'convnet_for_unittest':
                size_z = 1
            if model_name == 'unet_multi_z':
                size_z = 5
            if model_name == 'convnet_for_unittest':
                size_xy = 100

            s.make_model(model_name, (size_z, size_xy, size_xy))

        s.set_normalization(norm_string)
        s.set_augmentation(aug_string)

        if valfraction > 0:
            s.define_validation_data(valfraction)

        s.train(max_epochs=max_epochs,
                steps_per_epoch=steps_per_epoch,
                log_filename=log_filename,
                model_filename=model_export_filename)

    if args['predict']:
        output_path = args['<output_path>']
        assert os.path.isfile(model_name), '<network> must be a h5 model file'
        s.load_prediction_data(image_path, output_path)
        s.load_model(model_name)
        s.set_normalization(norm_string)
        s.predict()