コード例 #1
0
    def gen_data(self):

        # upload init_args parameters for dataset loader:
        init_args = self.init_args_dict()

        batch_size = self.batch_size

        # generate datasets for train/validation
        datagen = NeMOImageGenerator(image_shape=self.input_shape,
                                     image_resample=True,
                                     pixelwise_center=True,
                                     pixel_mean=self.pixel_mean,
                                     pixelwise_std_normalization=True,
                                     pixel_std=self.pixel_std)

        train_loader = ImageSetLoader(**init_args['image_set_loader']['train'])
        val_loader = ImageSetLoader(**init_args['image_set_loader']['val'])

        # generate model data

        self.train_generator = datagen.flow_from_imageset(
            class_mode='categorical',
            classes=self.num_classes,
            batch_size=batch_size,
            shuffle=True,
            image_set_loader=train_loader)

        self.validation_generator = datagen.flow_from_imageset(
            class_mode='categorical',
            classes=self.num_classes,
            batch_size=batch_size,
            shuffle=True,
            image_set_loader=val_loader)

        return self.train_generator, self.validation_generator
コード例 #2
0
    def gen_data(self):

        # upload init_args parameters for dataset loader:
        init_args = init_args_dict()

        # generate datasets for train/validation
        self.datagen = NeMOImageGenerator(image_shape=self.input_shape,
                                          image_resample=True,
                                          pixelwise_center=True,
                                          pixel_mean=self.pixel_mean,
                                          pixelwise_std_normalization=True,
                                          pixel_std=self.pixel_std)

        self.train_loader = ImageSetLoader(
            **init_args['image_set_loader']['train'])
        self.val_loader = ImageSetLoader(
            **init_args['image_set_loader']['val'])

        return self.datagen, self.train_loader, self.val_loader
コード例 #3
0
    def NeMO_FCN_vgg16(self, image_shape = image_shape , weight_decay = weight_decay, lr = lr):



        os.environ["CUDA_VISIBLE_DEVICES"] = "0"

        global _SESSION
        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.allow_growth = True
        _SESSION = tf.Session(config=config)
        K.set_session(_SESSION)


        with open("init_args.yml", 'r') as stream:
            try:
                init_args = yaml.load(stream)
            except yaml.YAMLError as exc:
                print(exc)

        checkpointer = ModelCheckpoint(filepath="./tmp/fcn_vgg16_weights.h5", verbose=1, save_best_only=True)
        lr_reducer = ReduceLROnPlateau(monitor='val_loss',
                                       factor=np.sqrt(0.1),
                                       cooldown=0,
                                       patience=10, min_lr=1e-12)
        early_stopper = EarlyStopping(monitor='val_loss',
                                      min_delta=0.001,
                                      patience=30)
        nan_terminator = TerminateOnNaN()
        SaveWeights = WeightsSaver(filepath='./weights/', N=10)
        #csv_logger = CSVLogger('output/tmp_fcn_vgg16.csv')
             #'output/{}_fcn_vgg16.csv'.format(datetime.datetime.now().isoformat()))

        #check_num = CheckNumericsOps(validation_data=[np.random.random((1, 224, 224, 3)), 1],
        #                             histogram_freq=100)

        # log history during model fit
        csv_logger = CSVLogger('output/log.csv', append=True, separator=';')

        datagen = NeMOImageGenerator(image_shape=[100, 100, 3],
                                            image_resample=True,
                                            pixelwise_center=True,
                                            pixel_mean=[127.5, 127.5, 127.5],
                                            pixelwise_std_normalization=True,
                                            pixel_std=[127.5, 127.5, 127.5])

        train_loader = ImageSetLoader(**init_args['image_set_loader']['train'])
        val_loader = ImageSetLoader(**init_args['image_set_loader']['val'])

        fcn_vgg16 = FCN(input_shape=(100, 100, 3), classes=4, weight_decay=3e-3,
                        weights='imagenet', trainable_encoder=True)
        optimizer = keras.optimizers.Adam(1e-4)

        fcn_vgg16.compile(optimizer=optimizer,
                          loss='categorical_crossentropy',
                          metrics=['accuracy'])

        fcn_vgg16.fit_generator(
            datagen.flow_from_imageset(
                class_mode='categorical',
                classes=4,
                batch_size=10,
                shuffle=True,
                image_set_loader=train_loader),
            steps_per_epoch=80,
            epochs=2,
            validation_data=datagen.flow_from_imageset(
                class_mode='categorical',
                classes=4,
                batch_size=4,
                shuffle=True,
                image_set_loader=val_loader),
            validation_steps=20,
            verbose=1,
            callbacks=[lr_reducer, early_stopper, nan_terminator,checkpointer, csv_logger, SaveWeights])
コード例 #4
0
    def gen_data(self):
        # generate train set
		
		
		
init_args_type <class 'dict'>
{'image_set_loader': {'val': {'image_format': 'png', 'image_dir': '../Images/Valid_Patches/', 
'label_format': 'png', 'image_set': '../Images/Valid_Patches/NeMO_valid.txt', 
'target_size': [100, 100], 
'label_dir': '../Images/ValidRef_Patches/', 'color_mode': 'rgb'}, 
'test': {'image_format': 'jpg', 'image_dir': '../data/VOC2011/JPEGImages/', 
'label_format': 'png',
 'image_set': '../data/VOC2011/ImageSets/Segmentation/test.txt', 
'target_size': [224, 224], 'label_dir': '../data/VOC2011/SegmentationClass', 
'color_mode': 'rgb'}, 
'train': {'image_format': 'png', 'image_dir': '../Images/Training_Patches/', 
'label_format': 'png',
 'image_set': '../Images/Training_Patches/NeMO_train.txt', 
 'target_size': [100, 100], 
 'label_dir': '../Images/TrainingRef_Patches/',
 'color_mode': 'rgb'}}

#### Optimize on NeMO_FCN vgg16 based model		
# Input:
#	exporttrainpath: Directory for exported patch images 
# 	exportlabelpath: Directory for exported segmented images
# 	txtfilename: Name of text file to record image names (remember to include '.txt')
#   image_shape: (row x col x channel)
# 	image_size: Size of image patch to use in opt (for symmetric images)
# 	N: Number of images per class (NOTE: because these are segmented maps, the class is only affiliated with the center pixel)
# 	lastchannelremove: Remove last channel or not
# 	labelkey: Naming convention of class labels (NOTE: must be same # as the # of classes)
    
    def NeMO_FCN_vgg16(self, image_shape = image_shape , weight_decay = weight_decay, lr = lr):



        os.environ["CUDA_VISIBLE_DEVICES"] = "0"

        global _SESSION
        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.allow_growth = True
        _SESSION = tf.Session(config=config)
        K.set_session(_SESSION)


        with open("init_args.yml", 'r') as stream:
            try:
                init_args = yaml.load(stream)
            except yaml.YAMLError as exc:
                print(exc)

        checkpointer = ModelCheckpoint(filepath="./tmp/fcn_vgg16_weights.h5", verbose=1, save_best_only=True)
        lr_reducer = ReduceLROnPlateau(monitor='val_loss',
                                       factor=np.sqrt(0.1),
                                       cooldown=0,
                                       patience=10, min_lr=1e-12)
        early_stopper = EarlyStopping(monitor='val_loss',
                                      min_delta=0.001,
                                      patience=30)
        nan_terminator = TerminateOnNaN()
        SaveWeights = WeightsSaver(filepath='./weights/', N=10)
        #csv_logger = CSVLogger('output/tmp_fcn_vgg16.csv')
             #'output/{}_fcn_vgg16.csv'.format(datetime.datetime.now().isoformat()))

        #check_num = CheckNumericsOps(validation_data=[np.random.random((1, 224, 224, 3)), 1],
        #                             histogram_freq=100)

        # log history during model fit
        csv_logger = CSVLogger('output/log.csv', append=True, separator=';')

        datagen = NeMOImageGenerator(image_shape=[100, 100, 3],
                                            image_resample=True,
                                            pixelwise_center=True,
                                            pixel_mean=[127.5, 127.5, 127.5],
                                            pixelwise_std_normalization=True,
                                            pixel_std=[127.5, 127.5, 127.5])

        train_loader = ImageSetLoader(**init_args['image_set_loader']['train'])
        val_loader = ImageSetLoader(**init_args['image_set_loader']['val'])

        fcn_vgg16 = FCN(input_shape=(100, 100, 3), classes=4, weight_decay=3e-3,
                        weights='imagenet', trainable_encoder=True)
        optimizer = keras.optimizers.Adam(1e-4)

        fcn_vgg16.compile(optimizer=optimizer,
                          loss='categorical_crossentropy',
                          metrics=['accuracy'])

        fcn_vgg16.fit_generator(
            datagen.flow_from_imageset(
                class_mode='categorical',
                classes=4,
                batch_size=10,
                shuffle=True,
                image_set_loader=train_loader),
            steps_per_epoch=80,
            epochs=2,
            validation_data=datagen.flow_from_imageset(
                class_mode='categorical',
                classes=4,
                batch_size=4,
                shuffle=True,
                image_set_loader=val_loader),
            validation_steps=20,
            verbose=1,
            callbacks=[lr_reducer, early_stopper, nan_terminator,checkpointer, csv_logger, SaveWeights])
コード例 #5
0
def val_loader():
    print(init_args['image_set_loader']['val'])
    return ImageSetLoader(**init_args['image_set_loader']['val'])
コード例 #6
0
def NeMOtrain_loader():
    return ImageSetLoader(**init_args['image_set_loader']['train'])
コード例 #7
0
ファイル: NeMO_FCN.py プロジェクト: NASA-NeMO-Net/NeMO-Net
    #'output/{}_fcn_vgg16.csv'.format(datetime.datetime.now().isoformat()))

#check_num = CheckNumericsOps(validation_data=[np.random.random((1, 224, 224, 3)), 1],
#                             histogram_freq=100)

# log history during model fit
csv_logger = CSVLogger('output/log.csv', append=True, separator=';')

datagen = NeMOImageGenerator(image_shape=[image_size, image_size, 3],
                                    image_resample=True,
                                    pixelwise_center=True,
                                    pixel_mean=[127.5, 127.5, 127.5],
                                    pixelwise_std_normalization=True,
                                    pixel_std=[127.5, 127.5, 127.5])

train_loader = ImageSetLoader(**init_args['image_set_loader']['train'])
val_loader = ImageSetLoader(**init_args['image_set_loader']['val'])

fcn_vgg16 = FCN(input_shape=(image_size, image_size, 3), classes=4, weight_decay=3e-3,
                weights='imagenet', trainable_encoder=True)
optimizer = keras.optimizers.Adam(1e-4)

fcn_vgg16.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

fcn_vgg16.fit_generator(
    datagen.flow_from_imageset(
        class_mode='categorical',
        classes=4,
        batch_size=20,