Esempio n. 1
0
    def basic_model_properties(self, cf, variable_input_size):
        # Define the input size, loss and metrics
        if cf.dataset.class_mode == 'categorical':
            if K.image_dim_ordering() == 'th':
                in_shape = (cf.dataset.n_channels,
                            cf.target_size_train[0],
                            cf.target_size_train[1])
            else:
                in_shape = (cf.target_size_train[0],
                            cf.target_size_train[1],
                            cf.dataset.n_channels)
            loss = 'categorical_crossentropy'
            metrics = ['accuracy']
        elif cf.dataset.class_mode == 'detection':

            # Check model, different detection nets may have different losses and metrics
            if cf.model_name in ['yolo', 'tiny-yolo']:
                in_shape = (cf.dataset.n_channels,
                            cf.target_size_train[0],
                            cf.target_size_train[1])
                loss = YOLOLoss(in_shape, cf.dataset.n_classes, cf.dataset.priors)
                metrics = [YOLOMetrics(in_shape, cf.dataset.n_classes, cf.dataset.priors)]
            elif cf.model_name == 'ssd300':
                in_shape = (cf.target_size_train[0],
                            cf.target_size_train[1], cf.dataset.n_channels)

                loss = MultiboxLoss(cf.dataset.n_classes, neg_pos_ratio=2.0).compute_loss
                metrics = None
                # TODO: Add metrics for SSD
                # priors = pickle.load(open('prior_boxes_ssd300.pkl', 'rb'))
                # metrics = [SSDMetrics(priors, cf.dataset.n_classes)]
            else:
                raise NotImplementedError

        elif cf.dataset.class_mode == 'segmentation':
            if K.image_dim_ordering() == 'th':
                if variable_input_size:
                    in_shape = (cf.dataset.n_channels,
                                None,
                                None)
                else:
                    in_shape = (cf.dataset.n_channels,
                                cf.target_size_train[0],
                                cf.target_size_train[1])
            else:
                if variable_input_size:
                    in_shape = (None,
                                None,
                                cf.dataset.n_channels)
                else:
                    in_shape = (cf.target_size_train[0],
                                cf.target_size_train[1],
                                cf.dataset.n_channels)
            loss = cce_flatt(cf.dataset.void_class, cf.dataset.cb_weights)
            metrics = [IoU(cf.dataset.n_classes, cf.dataset.void_class)]
        else:
            raise ValueError('Unknown problem type')
        return in_shape, loss, metrics
Esempio n. 2
0
    def basic_model_properties(self, cf, variable_input_size):
        # Define the input size, loss and metrics
        if cf.dataset.class_mode == 'categorical':
            if K.image_dim_ordering() == 'th':
                in_shape = (cf.dataset.n_channels, cf.target_size_train[0],
                            cf.target_size_train[1])
            else:
                in_shape = (cf.target_size_train[0], cf.target_size_train[1],
                            cf.dataset.n_channels)
            loss = 'categorical_crossentropy'
            metrics = ['accuracy']
        elif cf.dataset.class_mode == 'detection':

            if cf.model_name in ['yolo', 'tiny-yolo']:
                in_shape = (cf.dataset.n_channels, cf.target_size_train[0],
                            cf.target_size_train[1])
                loss = YOLOLoss(in_shape, cf.dataset.n_classes,
                                cf.dataset.priors)
                metrics = [
                    YOLOMetrics(in_shape, cf.dataset.n_classes,
                                cf.dataset.priors)
                ]
            elif cf.model_name in [
                    'ssd300', 'ssd300_pretrained', 'ssd_resnet50'
            ]:
                # TODO: in_shape ok for ssd?
                in_shape = (cf.target_size_train[0], cf.target_size_train[1],
                            cf.dataset.n_channels)

                # TODO: extract config parameters from MultiboxLoss
                mboxloss = MultiboxLoss(cf.dataset.n_classes + 1,
                                        alpha=1.0,
                                        neg_pos_ratio=2.0,
                                        background_label_id=0,
                                        negatives_for_hard=100.0)
                loss = mboxloss.compute_loss
                metrics = []  # TODO: add mAP metric

        elif cf.dataset.class_mode == 'segmentation':
            if K.image_dim_ordering() == 'th':
                if variable_input_size:
                    in_shape = (cf.dataset.n_channels, None, None)
                else:
                    in_shape = (cf.dataset.n_channels, cf.target_size_train[0],
                                cf.target_size_train[1])
            else:
                if variable_input_size:
                    in_shape = (None, None, cf.dataset.n_channels)
                else:
                    in_shape = (cf.target_size_train[0],
                                cf.target_size_train[1], cf.dataset.n_channels)
            loss = cce_flatt(cf.dataset.void_class, cf.dataset.cb_weights)
            metrics = [IoU(cf.dataset.n_classes, cf.dataset.void_class)]
        else:
            raise ValueError('Unknown problem type')
        return in_shape, loss, metrics
Esempio n. 3
0
 def basic_model_properties(self, cf, variable_input_size):
     # Define the input size, loss and metrics
     if cf.dataset.class_mode == 'categorical':
         if K.image_dim_ordering() == 'th':
             in_shape = (cf.dataset.n_channels, cf.target_size_train[0],
                         cf.target_size_train[1])
         else:
             in_shape = (cf.target_size_train[0], cf.target_size_train[1],
                         cf.dataset.n_channels)
         loss = 'categorical_crossentropy'
         metrics = ['accuracy']
     elif cf.dataset.class_mode == 'detection':
         if cf.model_name in ['yolo', 'tiny-yolo']:
             in_shape = (cf.dataset.n_channels, cf.target_size_train[0],
                         cf.target_size_train[1])
             loss = YOLOLoss(in_shape, cf.dataset.n_classes,
                             cf.dataset.priors)
             metrics = [
                 YOLOMetrics(in_shape, cf.dataset.n_classes,
                             cf.dataset.priors)
             ]
         elif cf.model_name == 'ssd':
             if K.image_dim_ordering() == 'th':
                 in_shape = (cf.dataset.n_channels, cf.target_size_train[0],
                             cf.target_size_train[1])
             else:
                 in_shape = (cf.target_size_train[0],
                             cf.target_size_train[1], cf.dataset.n_channels)
             loss = SSDLoss(in_shape, cf.dataset.n_classes + 1,
                            cf.dataset.priors)  #+1 to include background
             #metrics = [SSDMetrics(in_shape, cf.dataset.n_classes, cf.dataset.priors)]
             metrics = []
         else:
             raise ValueError('Unknown model')
     elif cf.dataset.class_mode == 'segmentation':
         if K.image_dim_ordering() == 'th':
             if variable_input_size:
                 in_shape = (cf.dataset.n_channels, None, None)
             else:
                 in_shape = (cf.dataset.n_channels, cf.target_size_train[0],
                             cf.target_size_train[1])
         else:
             if variable_input_size:
                 in_shape = (None, None, cf.dataset.n_channels)
             else:
                 in_shape = (cf.target_size_train[0],
                             cf.target_size_train[1], cf.dataset.n_channels)
         loss = cce_flatt(cf.dataset.void_class, cf.dataset.cb_weights)
         metrics = [IoU(cf.dataset.n_classes, cf.dataset.void_class)]
     else:
         raise ValueError('Unknown problem type')
     return in_shape, loss, metrics
Esempio n. 4
0
 def basic_model_properties(self, cf, variable_input_size):
     # Define the input size, loss and metrics
     if cf.dataset.class_mode == 'categorical':
         if K.image_dim_ordering() == 'th':
             in_shape = (cf.dataset.n_channels,
                         cf.target_size_train[0],
                         cf.target_size_train[1])
         else:
             in_shape = (cf.target_size_train[0],
                         cf.target_size_train[1],
                         cf.dataset.n_channels)
         loss = 'categorical_crossentropy'
         metrics = ['accuracy']
     elif cf.dataset.class_mode == 'detection':
         if "yolo" in cf.model_name:
           in_shape = (cf.dataset.n_channels,
                       cf.target_size_train[0],
                       cf.target_size_train[1])
           # TODO detection : check model, different detection nets may have different losses and metrics
           loss = YOLOLoss(in_shape, cf.dataset.n_classes, cf.dataset.priors)
           metrics = [YOLOMetrics(in_shape, cf.dataset.n_classes, cf.dataset.priors)]
         elif "ssd" in cf.model_name:
           in_shape = (cf.target_size_train[0],
                       cf.target_size_train[1],
                       cf.dataset.n_channels,)
           loss = SSDLoss(cf.dataset.n_classes)
           metrics = [SSDMetrics()]
     elif cf.dataset.class_mode == 'segmentation':
         if K.image_dim_ordering() == 'th':
             if variable_input_size:
                 in_shape = (cf.dataset.n_channels, None, None)
             else:
                 in_shape = (cf.dataset.n_channels,
                             cf.target_size_train[0],
                             cf.target_size_train[1])
         else:
             if variable_input_size:
                 in_shape = (None, None, cf.dataset.n_channels)
             else:
                 in_shape = (cf.target_size_train[0],
                             cf.target_size_train[1],
                             cf.dataset.n_channels)
         loss = cce_flatt(cf.dataset.void_class, cf.dataset.cb_weights)
         metrics = [IoU(cf.dataset.n_classes, cf.dataset.void_class)]
     else:
         raise ValueError('Unknown problem type')
     return in_shape, loss, metrics
Esempio n. 5
0
 def basic_model_properties(self, cf, variable_input_size):
     # Define the input size, loss and metrics
     if cf.dataset.class_mode == 'categorical':
         if K.image_dim_ordering() == 'th':
             in_shape = (cf.dataset.n_channels,
                         cf.target_size_train[0],
                         cf.target_size_train[1])
         else:
             in_shape = (cf.target_size_train[0],
                         cf.target_size_train[1],
                         cf.dataset.n_channels)
         loss = 'categorical_crossentropy'
         metrics = ['accuracy']
     elif cf.dataset.class_mode == 'detection':
         if cf.model_name == 'ssd':
             in_shape = (cf.target_size_train[0],
                         cf.target_size_train[1],
                         cf.dataset.n_channels,)
             loss = MultiboxLoss(cf.dataset.n_classes, neg_pos_ratio=2.0).compute_loss
             metrics = None
         else: # YOLO
             in_shape = (cf.dataset.n_channels,
                         cf.target_size_train[0],
                         cf.target_size_train[1])
             loss = YOLOLoss(in_shape, cf.dataset.n_classes, cf.dataset.priors)
             metrics = [YOLOMetrics(in_shape, cf.dataset.n_classes, cf.dataset.priors)]
     elif cf.dataset.class_mode == 'segmentation':
         if K.image_dim_ordering() == 'th':
             if variable_input_size:
                 in_shape = (cf.dataset.n_channels, None, None)
             else:
                 in_shape = (cf.dataset.n_channels,
                             cf.target_size_train[0],
                             cf.target_size_train[1])
         else:
             if variable_input_size:
                 in_shape = (None, None, cf.dataset.n_channels)
             else:
                 in_shape = (cf.target_size_train[0],
                             cf.target_size_train[1],
                             cf.dataset.n_channels)
         loss = cce_flatt(cf.dataset.void_class, cf.dataset.cb_weights)
         metrics = [IoU(cf.dataset.n_classes, cf.dataset.void_class)]
     else:
         raise ValueError('Unknown problem type')
     return in_shape, loss, metrics
Esempio n. 6
0
    def make_segmentor(self):
        segmentor = build_segnet(self.img_shape,
                                 self.n_classes,
                                 l2_reg=0.,
                                 init='glorot_uniform',
                                 path_weights=None,
                                 freeze_layers_from=None,
                                 use_unpool=False,
                                 basic=False)
        lr = 1e-04
        optimizer = RMSprop(lr=lr, rho=0.9, epsilon=1e-8, clipnorm=10)
        print(
            '   Optimizer segmentor: rmsprop. Lr: {}. Rho: 0.9, epsilon=1e-8, '
            'clipnorm=10'.format(lr))
        the_loss = cce_flatt(self.cf.dataset.void_class,
                             self.cf.dataset.cb_weights)
        metrics = [IoU(self.cf.dataset.n_classes, self.cf.dataset.void_class)]

        segmentor.compile(loss=the_loss, metrics=metrics, optimizer=optimizer)
        return segmentor
Esempio n. 7
0
def train(dataset, model_name, learning_rate, weight_decay,
          num_epochs, max_patience, batch_size, optimizer,
          savepath, train_path, valid_path, test_path,
          crop_size=(224, 224), in_shape=(3, None, None),
          n_classes=5, gtSet=None, void_class=[4], w_balance=None,
          weights_file=False, show_model=False,
          plot_hist=True, train_model=True):

    # Remove void classes from number of classes
    n_classes = n_classes - len(void_class)

    # Mask folder (For different polyp groundtruths)
    if gtSet is not None:
        mask_floder = 'masks' + str(gtSet)
    else:
        mask_floder = 'masks'

    # TODO: Get the number of images directly from data loader
    n_images_train = 30  # 547
    n_images_val = 20  # 183
    n_images_test = 20  # 182

    # Normalization mean and std computed on training set for RGB pixel values
    print ('\n > Computing mean and std for normalization...')
    if False:
        rgb_mean, rgb_std = compute_mean_std(os.path.join(train_path, 'images'),
                                             os.path.join(train_path, mask_floder),
                                             n_classes)
        rescale = None
    else:
        rgb_mean = None
        rgb_std = None
        rescale = 1/255.
    print ('Mean: ' + str(rgb_mean))
    print ('Std: ' + str(rgb_std))

    # Compute class balance weights
    if w_balance is not None:
        class_balance_weights = compute_class_balance(masks_path=train_path + mask_floder,
                                                      n_classes=n_classes,
                                                      method=w_balance,
                                                      void_labels=void_class
                                                      )
        print ('Class balance weights: ' + str(class_balance_weights))
    else:
        class_balance_weights = None

    # Build model
    print ('\n > Building model (' + model_name + ')...')
    if model_name == 'fcn8':
        model = build_fcn8(in_shape, l2_reg=weight_decay, nclasses=n_classes,
                           weights_file=weights_file, deconv='deconv')
        model.output
    else:
        raise ValueError('Unknown model')

    # Create the optimizer
    print ('\n > Creating optimizer ({}) with lr ({})...'.format(optimizer,
                                                                learning_rate))
    if optimizer == 'rmsprop':
        opt = RMSprop(lr=learning_rate, rho=0.9, epsilon=1e-8, clipnorm=10)
    else:
        raise ValueError('Unknown optimizer')

    # Compile model
    print ('\n > Compiling model...')
    model.compile(loss=cce_flatt(void_class, class_balance_weights),
                  optimizer=opt)

    # Show model structure
    if show_model:
        model.summary()
        plot(model, to_file=savepath+'model.png')

    # Create the data generators
    print ('\n > Reading training set...')
    dg_tr = ImageDataGenerator(crop_size=crop_size,  # Crop the image to a fixed size
                               featurewise_center=False,  # Substract mean - dataset
                               samplewise_center=False,  # Substract mean - sample
                               featurewise_std_normalization=False,  # Divide std - dataset
                               samplewise_std_normalization=False,  # Divide std - sample
                               rgb_mean=rgb_mean,
                               rgb_std=rgb_std,
                               gcn=False,  # Global contrast normalization
                               zca_whitening=False,  # Apply ZCA whitening
                               rotation_range=180,  # Rnd rotation degrees 0-180
                               width_shift_range=0.0,  # Rnd horizontal shift
                               height_shift_range=0.0,  # Rnd vertical shift
                               shear_range=0.5,  # 0.5,  # Shear in radians
                               zoom_range=0.1,  # Zoom
                               channel_shift_range=0.,  # Channel shifts
                               fill_mode='constant',  # Fill mode
                               cval=0.,  # Void image value
                               void_label=void_class[0],  # Void class value
                               horizontal_flip=True,  # Rnd horizontal flip
                               vertical_flip=True,  # Rnd vertical flip
                               rescale=rescale,  # Rescaling factor
                               spline_warp=False,  # Enable elastic deformation
                               warp_sigma=10,  # Elastic deformation sigma
                               warp_grid_size=3  # Elastic deformation gridSize
                               )
    train_gen = dg_tr.flow_from_directory(train_path + 'images',
                                          batch_size=batch_size,
                                          gt_directory=train_path + mask_floder,
                                          target_size=crop_size,
                                          class_mode='seg_map',
                                          classes=n_classes,
                                          # save_to_dir=savepath,  # Save DA
                                          save_prefix='data_augmentation',
                                          save_format='png')

    print ('\n > Reading validation set...')
    dg_va = ImageDataGenerator(rgb_mean=rgb_mean, rgb_std=rgb_std, rescale=rescale)
    valid_gen = dg_va.flow_from_directory(valid_path + 'images',
                                          batch_size=1,
                                          gt_directory=valid_path + mask_floder,
                                          target_size=None,
                                          class_mode='seg_map',
                                          classes=n_classes)

    print ('\n > Reading testing set...')
    dg_ts = ImageDataGenerator(rgb_mean=rgb_mean, rgb_std=rgb_std, rescale=rescale)
    test_gen = dg_ts.flow_from_directory(test_path + 'images',
                                         batch_size=1,
                                         gt_directory=test_path + mask_floder,
                                         target_size=None,
                                         class_mode='seg_map',
                                         classes=n_classes,
                                         shuffle=False)

    # Define the jaccard validation callback
    eval_model = Evaluate_model(n_classes=n_classes,
                                void_label=void_class[0],
                                save_path=savepath,
                                valid_gen=valid_gen,
                                valid_epoch_length=n_images_val,
                                valid_metrics=['val_loss',
                                               'val_jaccard',
                                               'val_acc',
                                               'val_jaccard_perclass'])

    # Define early stopping callbacks
    early_stop_jac = EarlyStopping(monitor='val_jaccard', mode='max',
                                   patience=max_patience, verbose=0)
    early_stop_jac_class = []
    for i in range(n_classes):
        early_stop_jac_class += [EarlyStopping(monitor=str(i)+'_val_jacc_percl',
                                               mode='max', patience=max_patience,
                                               verbose=0)]

    # Define model saving callbacks
    checkp_jac = ModelCheckpoint(filepath=savepath+"weights.hdf5",
                                 verbose=0, monitor='val_jaccard',
                                 mode='max', save_best_only=True,
                                 save_weights_only=True)
    checkp_jac_class = []
    for i in range(n_classes):
        checkp_jac_class += [ModelCheckpoint(filepath=savepath+"weights"+str(i)+".hdf5",
                                             verbose=0,
                                             monitor=str(i)+'_val_jacc_percl',
                                             mode='max', save_best_only=True,
                                             save_weights_only=True)]

    # Train the model
    if (train_model):
        print('\n > Training the model...')
        cb = [eval_model, early_stop_jac, checkp_jac] + checkp_jac_class
        hist = model.fit_generator(train_gen, samples_per_epoch=n_images_train,
                                nb_epoch=num_epochs,
                                callbacks=cb)

    # Compute test metrics
    print('\n > Testing the model...')
    model.load_weights(savepath + "weights.hdf5")
    color_map = [
        (255/255., 0, 0),                   # Background
        (192/255., 192/255., 128/255.),     # Polyp
        (128/255., 64/255., 128/255.),      # Lumen
        (0, 0, 255/255.),                   # Specularity
        (0, 255/255., 0),         #
        (192/255., 128/255., 128/255.),     #
        (64/255., 64/255., 128/255.),       #
    ]
    test_metrics = compute_metrics(model, test_gen, n_images_test, n_classes,
                                   metrics=['test_loss',
                                            'test_jaccard',
                                            'test_acc',
                                            'test_jaccard_perclass'],
                                   color_map=color_map, tag="test",
                                   void_label=void_class[0],
                                   out_images_folder=savepath,
                                   epoch=0,
                                   save_all_images=True,
                                   useCRF=False)
    for k in sorted(test_metrics.keys()):
        print('{}: {}'.format(k, test_metrics[k]))

    if (train_model):
        # Save the results
        print ("\n > Saving history...")
        with open(savepath + "history.pickle", 'w') as f:
            pickle.dump([hist.history, test_metrics], f)

        # Load the results
        print ("\n > Loading history...")
        with open(savepath + "history.pickle") as f:
            history, test_metrics = pickle.load(f)
            # print (str(test_metrics))

        # Show the trained model history
        if plot_hist:
            print('\n > Show the trained model history...')
            plot_history(history, savepath, n_classes)
Esempio n. 8
0
        x_train -= x_train_mean
        x_test -= x_train_mean
        x_train /= (x_train_std + 1e-7)
        x_test /= (x_train_std + 1e-7)

    # plot data
    if(not args.nodisplay):
        for idx in range(25):
            plt.subplot(5,10,2*idx+1)
            plt.imshow(x_train[idx,:,:,0])
            plt.subplot(5,10,2*idx+2)
            plt.imshow(y_train[idx,:,:,0])
        plt.show()

    if(not args.nomodel):
        loss = cce_flatt(void_class, None)
        metrics = [IoU(n_classes, void_class)]
        #opt = RMSprop(lr=0.001, clipnorm=10)
        opt = Nadam(lr=0.002)

        model = build_fcn8(in_shape, n_classes, 0.)
        model.compile(loss=loss, metrics=metrics, optimizer=opt)

        cb = [EarlyStopping(monitor='val_loss', min_delta = 0.0001, patience=2)]
        model.fit(x_train, y_train, epochs=1000, batch_size=16, callbacks=cb, validation_data=(x_valid,y_valid))

        score = model.evaluate(x_test, y_test) #, batch_size=128)
        y_pred = model.predict(x_test)

        print(score)
Esempio n. 9
0
    def basic_model_properties(self, cf, variable_input_size):
        # Define the input size, loss and metrics
        if cf.dataset.class_mode == 'categorical':
            if K.image_dim_ordering() == 'th':
                in_shape = (cf.dataset.n_channels,
                            cf.target_size_train[0],
                            cf.target_size_train[1])

            else:
                in_shape = (cf.target_size_train[0],
                            cf.target_size_train[1],
                            cf.dataset.n_channels)

            loss = 'categorical_crossentropy'
            metrics = ['accuracy']

        elif cf.dataset.class_mode == 'detection':
            if 'yolo' in cf.model_name:
                in_shape = (cf.dataset.n_channels,
                            cf.target_size_train[0],
                            cf.target_size_train[1])

                loss = YOLOLoss(in_shape, cf.dataset.n_classes, cf.dataset.priors)
                metrics = [YOLOMetrics(in_shape, cf.dataset.n_classes, cf.dataset.priors)]

            elif cf.model_name == 'ssd':
                in_shape = (cf.target_size_train[0],
                            cf.target_size_train[1],
                            cf.dataset.n_channels)
                loss = MultiboxLoss(cf.dataset.n_classes, neg_pos_ratio=2.0).compute_loss
                metrics = [] # TODO: There is no metrics for the ssd model

            else:
                raise ValueError('Uknown "' + cf.model_name + '" name for the ' + cf.dataset.class_mode + ' problem type.'
                                'Only is implemented for: {yolo, tiny-yolo, ssd}')

        elif cf.dataset.class_mode == 'segmentation':
            if K.image_dim_ordering() == 'th':
                if variable_input_size:
                    in_shape = (cf.dataset.n_channels, None, None)
                else:
                    in_shape = (cf.dataset.n_channels,
                                cf.target_size_train[0],
                                cf.target_size_train[1])

            else:
                if variable_input_size:
                    in_shape = (None, None, cf.dataset.n_channels)
                else:
                    in_shape = (cf.target_size_train[0],
                                cf.target_size_train[1],
                                cf.dataset.n_channels)


            loss = cce_flatt(cf.dataset.void_class, cf.dataset.cb_weights)
            metrics = [IoU(cf.dataset.n_classes, cf.dataset.void_class)]

            # if cf.model_name == 'fcn8':
            #     loss = cce_flatt(cf.dataset.void_class, cf.dataset.cb_weights)
            #     metrics = [IoU(cf.dataset.n_classes, cf.dataset.void_class)]
            #
            # elif 'segnet' in cf.model_name:
            #     loss = 'categorical_crossentropy'
            #     metrics = []
            #
            # else:
            #     raise ValueError('Uknown "'+cf.model_name+'" name for the '+cf.dataset.class_mode+' problem type.'
            #                     'Only is implemented for: {fc8, segnet}')

        else:
            raise ValueError('Unknown problem type')

        return in_shape, loss, metrics