Ejemplo n.º 1
0
    def test_load_model(self):

        os.environ['CUDA_VISIBLE_DEVICES'] = ''
        img_path = os.path.join(base_path, '../test_data/shapes/pixels/*')
        label_path = os.path.join(base_path, '../test_data/shapes/labels.ilp')
        savepath = os.path.join(base_path, '../test_data/tmp')

        os.makedirs(savepath, exist_ok=True)

        # train initial model and save it
        t = Session()
        t.load_training_data(img_path, label_path)
        t.make_model('convnet_for_unittest', (1, 100, 100))
        t.train(max_epochs=3,
                steps_per_epoch=2,
                log_filename=os.path.join(savepath, 'log.csv'),
                model_filename=os.path.join(savepath, 'model.h5'))

        # initialize new sassion and load pretrained model
        t2 = Session()
        t2.load_training_data(img_path, label_path)

        assert t2.model is None
        t2.load_model(os.path.join(savepath, 'model.h5'))
        assert t2.model is not None

        # train model further
        t2.train(max_epochs=3,
                 steps_per_epoch=2,
                 log_filename=os.path.join(savepath, 'log.csv'),
                 model_filename=os.path.join(savepath, 'model_continued.h5'))
Ejemplo n.º 2
0
    def test_shape_data_fast(self):

        # train a classifier and predict training data
        os.environ['CUDA_VISIBLE_DEVICES'] = '2'

        img_path = os.path.join(base_path, '../test_data/shapes/pixels/*')
        label_path = os.path.join(base_path, '../test_data/shapes/labels.ilp')
        savepath = os.path.join(base_path, '../test_data/tmp')

        os.makedirs(savepath, exist_ok=True)

        t = Session()
        t.load_training_data(img_path, label_path)
        t.make_model('convnet_for_unittest', (1, 100, 100))

        t.train(max_epochs=3,
                steps_per_epoch=4,
                log_filename=os.path.join(savepath, 'log.csv'),
                model_filename=os.path.join(savepath, 'model.h5'))
        t.load_prediction_data(img_path, savepath)
        t.predict()

        artifacts = os.listdir(savepath)
        assert 'log.csv' in artifacts
        assert 'pixels_1_class_1.tif' in artifacts
        assert 'pixels_1_class_2.tif' in artifacts
        assert 'pixels_1_class_3.tif' in artifacts
        assert 'pixels_2_class_1.tif' in artifacts
        assert 'pixels_2_class_2.tif' in artifacts
        assert 'pixels_2_class_3.tif' in artifacts

        shutil.rmtree(savepath)
Ejemplo n.º 3
0
    def test_set_augmentation(self):

        img_path = os.path.join(base_path, '../test_data/shapes/pixels/*')
        label_path = os.path.join(base_path, '../test_data/shapes/labels.ilp')

        t = Session()
        t.load_training_data(img_path, label_path)
        t.make_model('unet_2d', (1, 572, 572))

        t.set_augmentation('flip+rotate')
        self.assertEqual(t.data.augmentation, {'rotate', 'flip'})
        t.set_augmentation('flip+rotate+shear')
        self.assertEqual(t.data.augmentation, {'rotate', 'flip', 'shear'})
        t.set_augmentation('flip')
        self.assertEqual(t.data.augmentation, {'flip'})
Ejemplo n.º 4
0
    def test_set_normalization(self):

        img_path = os.path.join(base_path, '../test_data/shapes/pixels/*')
        label_path = os.path.join(base_path, '../test_data/shapes/labels.ilp')

        t = Session()
        t.load_training_data(img_path, label_path)
        t.make_model('unet_2d', (1, 572, 572))

        t.set_normalization('local')
        assert t.data.normalize_mode == 'local'
        assert t.data.global_norm_minmax is None

        t.set_normalization('global_0+255')
        assert t.data.normalize_mode == 'global'
        assert t.data.global_norm_minmax == (0, 255)
Ejemplo n.º 5
0
def train_test_model_convnet():
    img_path = os.path.join(base_path, '../test_data/shapes/pixels/*')
    label_path = os.path.join(base_path, '../test_data/shapes/labels.h5')
    savepath = os.path.join(base_path, '../test_data/tmp')
    model_filename = os.path.join(savepath, 'model_convnet.h5')
    if os.path.isfile(model_filename):
        return model_filename

    os.makedirs(savepath, exist_ok=True)

    t = Session()
    t.load_training_data(img_path, label_path)
    t.make_model('convnet_for_unittest', (1, 100, 100))
    t.train(max_epochs=2,
            steps_per_epoch=2,
            log_filename=os.path.join(savepath, 'log.csv'),
            model_filename=model_filename)

    return model_filename
Ejemplo n.º 6
0
def train_test_model_unet_2d_3channels_2classes():
    img_path = os.path.join(base_path, '../test_data/shapes/pixels_rgb/*')
    label_path = os.path.join(base_path,
                              '../test_data/shapes/labels_2classes.h5')
    savepath = os.path.join(base_path, '../test_data/tmp')
    model_filename = os.path.join(savepath,
                                  'model_unet_2d_3channels_2classes.h5')
    if os.path.isfile(model_filename):
        return model_filename

    os.makedirs(savepath, exist_ok=True)

    t = Session()
    t.load_training_data(img_path, label_path)
    t.make_model('unet_2d', (1, 220, 220))
    t.train(max_epochs=2,
            steps_per_epoch=2,
            log_filename=os.path.join(savepath, 'log.csv'),
            model_filename=model_filename)

    return model_filename
Ejemplo n.º 7
0
    def test_shape_data(self):

        # train a classifier and predict training data
        os.environ['CUDA_VISIBLE_DEVICES'] = '2'

        img_path = os.path.join(
            base_path,
            '../test_data/shapes/pixels/*')
        label_path = os.path.join(
            base_path,
            '../test_data/shapes/labels.h5')
        savepath = os.path.join(
            base_path,
            '../test_data/tmp')

        os.makedirs(savepath, exist_ok=True)

        t = Session()
        t.load_training_data(img_path, label_path)

        t.make_model('convnet_for_unittest', (1, 100, 100))

        t.train(max_epochs=50,
                steps_per_epoch=50,
                log_filename=os.path.join(savepath, 'log.csv'),
                model_filename=os.path.join(savepath, 'model.h5'))
        t.load_prediction_data(img_path, savepath)
        t.predict()

        # read prediction images and compare with validation data
        def read_images(image_nr, class_nr):
            if class_nr == 1:
                shape = 'circles'
            if class_nr == 2:
                shape = 'triangles'

            filename = os.path.join(
                                savepath,
                                'pixels_{}_class_{}.tif'.format(image_nr,
                                                                class_nr))
            print(filename)
            prediction_img = np.squeeze(imread(filename))
            filename = os.path.join(
                                    savepath,
                                    '../shapes/val/{}_{}.tiff'.format(
                                        shape,
                                        image_nr))
            print(filename)
            val_img = np.squeeze(skimage.io.imread(filename))
            return prediction_img, val_img

        prediction_img, val_img = read_images(1, 1)
        accuracy = np.mean(prediction_img[val_img > 0][:])
        print(accuracy)

        prediction_img, val_img = read_images(1, 2)
        accuracy = np.mean(prediction_img[val_img > 0][:])
        print(accuracy)

        prediction_img, val_img = read_images(2, 1)
        accuracy = np.mean(prediction_img[val_img > 0][:])
        print(accuracy)

        prediction_img, val_img = read_images(2, 2)
        accuracy = np.mean(prediction_img[val_img > 0][:])
        print(accuracy)

        shutil.rmtree(savepath)
Ejemplo n.º 8
0
Archivo: main.py Proyecto: yapic/yapic
def main(args):

    if args['--cpu']:
        # deactivate gpu for tensorflow
        os.environ['CUDA_VISIBLE_DEVICES'] = ''
    if args['--gpu']:
        # define gpu hardware
        os.environ['CUDA_VISIBLE_DEVICES'] = args['--gpu']

    image_path = os.path.abspath(os.path.expanduser(args['<image_path>']))
    model_name = args['<network>']

    if args['deploy']:
        output_path = args['<output_path>']
        predict_example_image = not args['--skip-predict']

        assert os.path.isfile(model_name)

        exp = DeepimagejExporter(model_name, output_path, image_path)
        exp.export_as_deepimagej(author=args['--author'],
                                 version=args['--version'],
                                 url=args['--url'],
                                 credit=args['--credit'],
                                 reference=args['--reference'],
                                 size=args['--size'],
                                 applymodel=predict_example_image)
        return

    norm_string = args['--normalize']
    s = Session()

    if args['train']:

        label_path = os.path.abspath(os.path.expanduser(args['<label_path>']))
        aug_string = args['--augment']
        max_epochs = int(args['--epochs'])
        steps_per_epoch = int(args['--steps'])
        log_filename = args['--csvfile']
        model_export_filename = args['--file']
        valfraction = float(args['--valfraction'])
        if args['--batchsize']:
            batch_size = int(args['--batchsize'])
        else:
            batch_size = None

        s.load_training_data(image_path, label_path, batch_size)

        models_available = ['unet_2d', 'unet_multi_z', 'convnet_for_unittest']

        if os.path.isfile(model_name):
            s.load_model(model_name)
        elif model_name in models_available:
            size_xy = 572
            if model_name == 'unet_2d' or model_name == 'convnet_for_unittest':
                size_z = 1
            if model_name == 'unet_multi_z':
                size_z = 5
            if model_name == 'convnet_for_unittest':
                size_xy = 100

            s.make_model(model_name, (size_z, size_xy, size_xy))

        s.set_normalization(norm_string)
        s.set_augmentation(aug_string)

        if valfraction > 0:
            s.define_validation_data(valfraction)

        s.train(max_epochs=max_epochs,
                steps_per_epoch=steps_per_epoch,
                log_filename=log_filename,
                model_filename=model_export_filename)

        f = h5py.File(model_export_filename, 'r+')
        lbl_map_list = [[key, val] for key, val in s.lbl_map.items()]
        f.create_dataset('lbl_map', data=lbl_map_list)

    if args['predict']:
        output_path = args['<output_path>']
        assert os.path.isfile(model_name), '<network> must be a h5 model file'

        multichannel = not (args['--split'])

        s.load_prediction_data(image_path, output_path)
        s.load_model(model_name)
        s.set_normalization(norm_string)
        s.predict(multichannel=multichannel)
Ejemplo n.º 9
0
def main(args):

    s = Session()

    if args['--cpu']:
        # deactivate gpu for tensorflow
        os.environ['CUDA_VISIBLE_DEVICES'] = ''
    if args['--gpu']:
        # define gpu hardware
        os.environ['CUDA_VISIBLE_DEVICES'] = args['--gpu']

    image_path = os.path.abspath(os.path.expanduser(args['<image_path>']))
    model_name = args['<network>']
    norm_string = args['--normalize']


    if args['train']:

        label_path = os.path.abspath(os.path.expanduser(args['<label_path>']))
        aug_string = args['--augment']
        max_epochs = int(args['--epochs'])
        steps_per_epoch = int(args['--steps'])
        log_filename = args['--csvfile']
        model_export_filename = args['--file']
        valfraction = float(args['--valfraction'])

        s.load_training_data(image_path, label_path)

        models_available = ['unet_2d',
                            'unet_multi_z',
                            'convnet_for_unittest']

        if os.path.isfile(model_name):
            s.load_model(model_name)
        elif model_name in models_available:
            size_xy = 572
            if model_name == 'unet_2d' or model_name == 'convnet_for_unittest':
                size_z = 1
            if model_name == 'unet_multi_z':
                size_z = 5
            if model_name == 'convnet_for_unittest':
                size_xy = 100

            s.make_model(model_name, (size_z, size_xy, size_xy))

        s.set_normalization(norm_string)
        s.set_augmentation(aug_string)

        if valfraction > 0:
            s.define_validation_data(valfraction)

        s.train(max_epochs=max_epochs,
                steps_per_epoch=steps_per_epoch,
                log_filename=log_filename,
                model_filename=model_export_filename)

    if args['predict']:
        output_path = args['<output_path>']
        assert os.path.isfile(model_name), '<network> must be a h5 model file'
        s.load_prediction_data(image_path, output_path)
        s.load_model(model_name)
        s.set_normalization(norm_string)
        s.predict()