Esempio n. 1
0
    def __init__(self):

        batch_size = 32
        self.image_size = (300, 300, 3)
        n_classes = 80
        mode = 'inference_fast'
        l2_regularization = 0.0005
        min_scale = 0.1  # None
        max_scale = 0.9  # None
        scales = None  # [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05]
        aspect_ratios_global = None
        aspect_ratios_per_layer = [[1.0, 2.0, 0.5], [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0], [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0],
                                   [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0], [1.0, 2.0, 0.5], [1.0, 2.0, 0.5]]
        two_boxes_for_ar1 = True
        steps = None  # [8, 16, 32, 64, 100, 300]
        offsets = None  # [0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
        clip_boxes = True
        variances = [0.1, 0.1, 0.2, 0.2]
        coords = 'centroids'
        normalize_coords = True
        subtract_mean = [123, 117, 104]
        divide_by_stddev = 128
        swap_channels = None
        confidence_thresh = 0.7
        iou_threshold = 0.45
        top_k = 1
        nms_max_output_size = 400
        return_predictor_sizes = False

        model = mobilenet_v2_ssd(self.image_size, n_classes, mode, l2_regularization, min_scale, max_scale, scales,
                                 aspect_ratios_global, aspect_ratios_per_layer, two_boxes_for_ar1, steps,
                                 offsets, clip_boxes, variances, coords, normalize_coords, subtract_mean,
                                 divide_by_stddev, swap_channels, confidence_thresh, iou_threshold, top_k,
                                 nms_max_output_size, return_predictor_sizes)

        # 2: Load the trained weights into the model.
        weights_path = os.path.join("pretrained_weights", "ssdlite_coco_loss-4.8205_val_loss-4.1873.h5")
        model.load_weights(weights_path, by_name=True)
        # 3: Compile the model so that Keras won't complain the next time you load it.
        adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
        model.compile(optimizer=adam, loss=ssd_loss.compute_loss)
        self.model = model
Esempio n. 2
0
variances = [0.1, 0.1, 0.2, 0.2]
coords = 'centroids'
normalize_coords = True
subtract_mean = [123, 117, 104]
divide_by_stddev = 128
swap_channels = None
confidence_thresh = 0.01
iou_threshold = 0.45
top_k = 200
nms_max_output_size = 400
return_predictor_sizes = False

model = mobilenet_v2_ssd(image_size, n_classes, mode, l2_regularization,
                         min_scale, max_scale, scales, aspect_ratios_global,
                         aspect_ratios_per_layer, two_boxes_for_ar1, steps,
                         offsets, clip_boxes, variances, coords,
                         normalize_coords, subtract_mean, divide_by_stddev,
                         swap_channels, confidence_thresh, iou_threshold,
                         top_k, nms_max_output_size, return_predictor_sizes)

# 2: Load the trained weights into the model.
# weights_path = '../pretrained_weights/ssdlite_coco_loss-4.8205_val_loss-4.1873.h5'
weights_path = os.path.join("..", "pretrained_weights",
                            "ssdlite_coco_loss-4.8205_val_loss-4.1873.h5")

model.load_weights(weights_path, by_name=True)

# 3: Compile the model so that Keras won't complain the next time you load it.
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
model.compile(optimizer=adam, loss=ssd_loss.compute_loss)
    def train(self):
        # build model
        model = mobilenet_v2_ssd(self.training_config)

        # load weights
        model.load_weights(self.weights_path, by_name=True)

        # compile the model
        adam = Adam(lr=0.001,
                    beta_1=0.9,
                    beta_2=0.999,
                    epsilon=1e-08,
                    decay=0.0)
        ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
        # set_trainable(r"(ssd\_[cls|box].*)", model)
        model.compile(optimizer=adam, loss=ssd_loss.compute_loss)

        print(model.summary())

        # load data
        train_dataset = DataGenerator(load_images_into_memory=False,
                                      hdf5_dataset_path=None)
        val_dataset = DataGenerator(load_images_into_memory=False,
                                    hdf5_dataset_path=None)

        train_dataset.parse_json(
            images_dirs=[self.train_dir],
            annotations_filenames=[self.train_annotations],
            ground_truth_available=True,
            include_classes='all',
            ret=False)
        val_dataset.parse_json(images_dirs=[self.val_dir],
                               annotations_filenames=[self.val_annotations],
                               ground_truth_available=True,
                               include_classes='all',
                               ret=False)

        # We need the `classes_to_cats` dictionary. Read the documentation of this function to understand why.
        cats_to_classes, classes_to_cats, cats_to_names, classes_to_names = get_coco_category_maps(
            train_annotations_filename)

        # set the image transformations for pre-processing and data augmentation options.
        # For the training generator:
        ssd_data_augmentation = SSDDataAugmentation(img_height=image_size[0],
                                                    img_width=image_size[1],
                                                    background=subtract_mean)

        # For the validation generator:
        convert_to_3_channels = ConvertTo3Channels()
        resize = Resize(height=image_size[0], width=image_size[1])

        # instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.

        # The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.
        predictor_sizes = [
            model.get_layer('ssd_cls1conv2_bn').output_shape[1:3],
            model.get_layer('ssd_cls2conv2_bn').output_shape[1:3],
            model.get_layer('ssd_cls3conv2_bn').output_shape[1:3],
            model.get_layer('ssd_cls4conv2_bn').output_shape[1:3],
            model.get_layer('ssd_cls5conv2_bn').output_shape[1:3],
            model.get_layer('ssd_cls6conv2_bn').output_shape[1:3]
        ]

        ssd_input_encoder = SSDInputEncoder(
            img_height=image_size[0],
            img_width=image_size[1],
            n_classes=n_classes,
            predictor_sizes=predictor_sizes,
            scales=scales,
            aspect_ratios_per_layer=aspect_ratios_per_layer,
            two_boxes_for_ar1=two_boxes_for_ar1,
            steps=steps,
            offsets=offsets,
            clip_boxes=clip_boxes,
            variances=variances,
            matching_type='multi',
            pos_iou_threshold=0.5,
            neg_iou_limit=0.3,
            normalize_coords=normalize_coords)

        # create the generator handles that will be passed to Keras' `fit_generator()` function.

        train_generator = train_dataset.generate(
            batch_size=batch_size,
            shuffle=True,
            transformations=[ssd_data_augmentation],
            label_encoder=ssd_input_encoder,
            returns={'processed_images', 'encoded_labels'},
            keep_images_without_gt=False)

        val_generator = val_dataset.generate(
            batch_size=batch_size,
            shuffle=False,
            transformations=[convert_to_3_channels, resize],
            label_encoder=ssd_input_encoder,
            returns={'processed_images', 'encoded_labels'},
            keep_images_without_gt=False)

        # Get the number of samples in the training and validations datasets.
        train_dataset_size = train_dataset.get_dataset_size()
        val_dataset_size = val_dataset.get_dataset_size()

        print("Number of images in the training dataset:\t{:>6}".format(
            train_dataset_size))
        print("Number of images in the validation dataset:\t{:>6}".format(
            val_dataset_size))

        callbacks = [
            LearningRateScheduler(schedule=lr_schedule, verbose=1),
            TensorBoard(log_dir=log_dir,
                        histogram_freq=0,
                        write_graph=True,
                        write_images=False),
            ModelCheckpoint(os.path.join(
                log_dir,
                "ssdseg_coco_{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5"
            ),
                            monitor='val_loss',
                            verbose=1,
                            save_best_only=True,
                            save_weights_only=True)
        ]

        model.fit_generator(train_generator,
                            epochs=1000,
                            steps_per_epoch=1000,
                            callbacks=callbacks,
                            validation_data=val_generator,
                            validation_steps=100,
                            initial_epoch=0)