Example #1
0
    def __init__(self, image_shape, numClasses, palette,  pretrained_unet=None, pretrained_fcn=None, pretrained_segnet=None,
                 unet=False, fcn=False, segnet=False, channels=None):
        if channels is None:
            channels = [3, 3]

        # model selection
        self.unetchoice = unet
        self.fcnchoice = fcn
        self.segnetchoice = segnet

        # model parameters
        self.input_shape = (image_shape[0], image_shape[1], channels[0])
        self.num_of_classes = numClasses
        self.palette = palette
        if unet:
            if not pretrained_unet:
                raise ValueError("Need Weights to Evaluate Model")
            else:
                self.unet = UNET.build(self.input_shape, self.num_of_classes, pretrained_weights=pretrained_unet)

        if fcn:
            if not pretrained_fcn:
                raise ValueError("Need Weights to Evaluate Model")
            else:
                self.fcn = FCN.build(self.input_shape, self.num_of_classes, pretrained_weights=pretrained_fcn)

        if segnet:
            if not pretrained_segnet:
                raise ValueError("Need Weights to Evaluate Model")
            else:
                self.segnet = SegNet.build(self.input_shape, self.num_of_classes, pretrained_weights=pretrained_segnet)
Example #2
0
    def __init__(self, image_shape, numClasses, pretrained_weights, palette, channels=None):
        """
        :param image_shape: input image shape
        :param numClasses: number of classes in segmentation network
        :param pretrained_weights: pretrained weights for generator model (Not Optional)
        :param palette: colour palette for interpreting probability maps
        :param channels: number of channels in image and ground truth label maps, default=[3, 3]
        """
        if channels is None:
            channels = [3, 3]

        if pretrained_weights is None:
            raise ValueError('The generator model must be pre-trained!!')

        self.numOfClasses = numClasses
        self.pretrained_weights = pretrained_weights
        self.palette = palette

        # Training Parameters for adversarial (From Journal)
        self.INIT_LR = 0.0000001
        self.weight = 2

        # Learning parameters for segmentor
        self.INIT_LR_Seg = 0.0001

        # model parameters
        self.seg_shape = (image_shape[0], image_shape[1], numClasses)
        self.input_shape = (image_shape[0], image_shape[1], channels[0])

        # optimizers
        opt_dis = Adam(lr=self.INIT_LR)
        opt_gen = Adam(lr=self.INIT_LR_Seg)

        # Build Generator
        self.generator = UNET.build(self.input_shape, self.numOfClasses, pretrained_weights=pretrained_weights)
        self.generator.summary()

        # Build and Compile Discriminator
        self.discriminator = Stanford_Adversarial.build(self.input_shape, self.seg_shape)
        self.discriminator.trainable = True
        self.discriminator.compile(optimizer=opt_dis, loss='binary_crossentropy', metrics=['acc'])
        self.discriminator.summary()

        # Define Composite (Segmentor -> Adversarial) Model
        self.composite = self._define_composite(self.INIT_LR_Seg)

        # Compile Segmentor
        self.generator.trainable = True
        self.generator.compile(optimizer=opt_gen, loss='categorical_crossentropy', metrics=['acc'])
    def __init__(self, image_shape, numClasses, palette, train_mask_path, channels=None, classifier_weights=None,
                 segmentor_weights=None):
        if channels is None:
            channels = [3, 3]

        self.palette = palette
        self.image_shape = image_shape
        self.numOfClasses = numClasses
        self.classifier_weights = classifier_weights
        self.segmentor_weights = segmentor_weights

        if self.classifier_weights is None:
            raise ValueError("Classifier needs to be pretrained!!!")

        # End to End Training Parameters
        self.INIT_LR = 0.00001
        self.weight = 4
        self.training_weights = np.array([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0,
                                          1, 1, 0, 0, 0, 0, 0])

        # UNET Shape Parameter
        self.segmentor_input_shape = (self.image_shape[0], self.image_shape[1], channels[0])
        self.seg_shape = (self.image_shape[0], self.image_shape[1], self.numOfClasses)

        # ResNet50 Shape Parameter
        self.classifier_input_shape = (self.image_shape[0], self.image_shape[1], self.numOfClasses)

        # get Prior probability
        self.p_a_ag = self._get_prior(mask_path=train_mask_path)

        # Build Global Context Classifier
        ResNet = ResNet50()
        self.classifier = ResNet.build(self.classifier_input_shape, self.numOfClasses, self.classifier_weights)
        self.classifier.summary()

        # Build Segmentor
        self.segmentor = UNET.build(self.segmentor_input_shape, self.numOfClasses, self.segmentor_weights)
        self.segmentor.summary()

        # Define Composite
        self.composite = self._define_composite(learning_rate=self.INIT_LR)

        # Compile Segmentor
        self.segmentor_LR = 0.001
        opt = Adam(lr=self.segmentor_LR)
        self.segmentor.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['acc'])
                       numclasses=num_of_Classes,
                       channels=[3, 3],
                       palette=palette,
                       seed=47)

val_set = Dataloader(image_paths=val_frame_path,
                     mask_paths=val_mask_path,
                     image_size=input_size,
                     numclasses=num_of_Classes,
                     channels=[3, 3],
                     palette=palette,
                     seed=47)

# build model
model = UNET.build((640, 640, 3),
                   num_of_Classes,
                   pretrained_weights='weights.h5')
model.summary()

# learning parameters
BS = 2
INIT_LR = 0.00001
EPOCHS = 30

# initialize data generators
traingen = train_set.data_gen(should_augment=True, batch_size=BS)
valgen = val_set.data_gen(should_augment=False, batch_size=BS)

# initialise variables
No_of_train_images = len(os.listdir(dataset_path + '/train_frames/train'))
No_of_val_images = len(os.listdir(dataset_path + '/val_frames/val'))
                       channels=[3, 3],
                       palette=palette,
                       seed=47)

val_set = Dataloader(image_paths=val_frame_path,
                     mask_paths=val_mask_path,
                     image_size=input_size,
                     numclasses=num_of_Classes,
                     channels=[3, 3],
                     palette=palette,
                     seed=47)

# build model
model = UNET.build(352,
                   352,
                   3,
                   num_of_Classes,
                   pretrained_weights='UNET_weights.h5')
model.summary()

# learning parameters
BS = 2
INIT_LR = 0.0001
EPOCHS = 100

# initialize data generators
traingen = train_set.data_gen(should_augment=True, batch_size=BS)
valgen = val_set.data_gen(should_augment=False, batch_size=BS)

# initialise variables
No_of_train_images = len(os.listdir(dataset_path + '/train_frames/train'))
image = cv2.imread(args["image"])
output = image.copy()

# import colour palette
df = pd.read_csv('classes.csv', ",", header=None)
palette = np.array(df.values, dtype=np.float32)

image = cv2.resize(image, (args["height"], args["width"]))
image = image.astype("float32") / 255

# add batch dimension
image = image.reshape((1, image.shape[0], image.shape[1], 3))

# load model
print("[INFO] Loading Model............")
model = UNET.build((512, 512, 3), 29, pretrained_weights='AdvUNET.h5')
# model = load_model(args["model"], custom_objects={'dice_coef': dice_coef, 'mean_iou': mean_iou,
#                                                   'mean_iou_2': mean_iou_2},)

# make a prediction on the image
preds = model.predict(image)

# collapse class probabilities to label map
preds = preds.reshape((preds.shape[1], preds.shape[2], preds.shape[3]))
preds = preds.argmax(axis=-1)

label_map = np.zeros((preds.shape[0], preds.shape[1], 3)).astype('float32')

for ind in range(0, len(palette)):
    submat = np.where(preds == ind)