Example #1
0
validation_annotations_path = '../ConeData/annotations/validation'
test_annotations_path = '../ConeData/annotations/test'
'''
classes = ['background',
           'aeroplane', 'bicycle', 'bird', 'boat',
           'bottle', 'bus', 'car', 'cat',
           'chair', 'cow', 'diningtable', 'dog',
           'horse', 'motorbike', 'person', 'pottedplant',
           'sheep', 'sofa', 'train', 'tvmonitor']
'''
classes = ['background', 'Cone']

train_dataset.parse_xml(images_dirs=[train_images_path],
                        image_set_filenames=[train_setFileName],
                        annotations_dirs=[train_annotations_path],
                        classes=classes,
                        include_classes='all',
                        exclude_truncated=False,
                        exclude_difficult=False,
                        ret=False)

val_dataset.parse_xml(images_dirs=[validation_images_path],
                      image_set_filenames=[validation_setFileName],
                      annotations_dirs=[validation_annotations_path],
                      classes=classes,
                      include_classes='all',
                      exclude_truncated=False,
                      exclude_difficult=True,
                      ret=False)

# Optional: Convert the dataset into an HDF5 dataset. This will require more disk space, but will
# speed up the training. Doing this is not relevant in case you activated the `load_images_into_memory`
Example #2
0
def test_config(config):
    '''
    Test the given configuration ; the configuration should already have been
    used for training purposes, or this will return an error (see ssd_train.py)

    Arguments:
        config : the configuration of the model to use ; should already be
            loaded.

    '''
    local_dir = config.ROOT_FOLDER
    data_dir = config.DATA_DIR
    img_shape = config.IMG_SHAPE
    img_height = img_shape[0]  # Height of the model input images
    img_width = img_shape[1]  # Width of the model input images
    img_channels = img_shape[
        2]  # Number of color channels of the model input images
    n_classes = 20  # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO
    normalize_coords = True

    K.clear_session()  # Clear previous models from memory.
    print("[INFO] loading model...")
    model_path = os.path.join(local_dir, 'models', config.MODEL_NAME)

    # We need to create an SSDLoss object in order to pass that to the model loader.
    ssd_loss = SSDLoss(neg_pos_ratio=3, n_neg_min=0, alpha=1.0)
    model = load_model(model_path,
                       custom_objects={
                           'AnchorBoxes': AnchorBoxes,
                           'L2Normalization': L2Normalization,
                           'DecodeDetections': DecodeDetections,
                           'compute_loss': ssd_loss.compute_loss
                       })
    classes = config.CLASSES
    dataset = DataGenerator(load_images_into_memory=False,
                            hdf5_dataset_path=None)
    dataset_images_dir = os.path.join(data_dir, 'Images')
    dataset_annotations_dir = os.path.join(data_dir, 'Annotations/')
    dataset_test_image_set_filename = os.path.join(data_dir,
                                                   'ImageSets\\test.txt')

    dataset.parse_xml(images_dirs=[dataset_images_dir],
                      image_set_filenames=[dataset_test_image_set_filename],
                      annotations_dirs=[dataset_annotations_dir],
                      classes=classes,
                      include_classes='all',
                      exclude_truncated=False,
                      exclude_difficult=True,
                      ret=False)
    dataset.create_hdf5_dataset(file_path=config.MODEL_NAME,
                                resize=False,
                                variable_image_size=True,
                                verbose=True)

    convert_to_3_channels = ConvertTo3Channels()
    resize = Resize(height=img_height, width=img_width)
    dataset_size = dataset.get_dataset_size()

    print("Number of images in the dataset:\t{:>6}".format(dataset_size))

    predict_generator = dataset.generate(
        batch_size=config.PREDICT_BATCH_SIZE,
        shuffle=True,
        transformations=[convert_to_3_channels, resize],
        label_encoder=None,
        returns={
            'processed_images', 'filenames', 'inverse_transform',
            'original_images', 'original_labels'
        },
        keep_images_without_gt=False)

    count = 0
    while True and count < dataset_size:
        batch_images, batch_filenames, batch_inverse_transforms, batch_original_images, batch_original_labels = next(
            predict_generator)
        i = 0
        print("Image:", batch_filenames[i])
        print()
        print("Ground truth boxes:\n")
        print(np.array(batch_original_labels[i]))

        y_pred = model.predict(batch_images)
        y_pred_decoded = decode_detections(y_pred,
                                           confidence_thresh=0.5,
                                           iou_threshold=0.4,
                                           top_k=200,
                                           normalize_coords=normalize_coords,
                                           img_height=img_height,
                                           img_width=img_width)
        y_pred_decoded_inv = apply_inverse_transforms(
            y_pred_decoded, batch_inverse_transforms)

        np.set_printoptions(precision=2, suppress=True, linewidth=90)
        print("Predicted boxes:\n")
        print('   class   conf xmin   ymin   xmax   ymax')
        print(y_pred_decoded_inv[i])
        # cv2.imshow('original image',batch_original_images[i])
        # cv2.waitKey(800)
        # cv2.destroyAllWindows()
        colors = plt.cm.hsv(np.linspace(0, 1, n_classes + 1)).tolist()
        plt.figure(figsize=(15, 8))
        plt.imshow(batch_original_images[i])

        current_axis = plt.gca()
        len_orig = 0
        for box in batch_original_labels[i]:
            len_orig += 1
            xmin = box[1]
            ymin = box[2]
            xmax = box[3]
            ymax = box[4]
            label = '{}'.format(classes[int(box[0])])
            current_axis.add_patch(
                plt.Rectangle((xmin, ymin),
                              xmax - xmin,
                              ymax - ymin,
                              color='green',
                              fill=False,
                              linewidth=2))
            current_axis.text(xmin,
                              ymin,
                              label,
                              size='x-large',
                              color='white',
                              bbox={
                                  'facecolor': 'green',
                                  'alpha': 1.0
                              })

        len_found = 0
        for box in y_pred_decoded_inv[i]:
            len_found += 1
            xmin = box[2]
            ymin = box[3]
            xmax = box[4]
            ymax = box[5]
            color = colors[int(box[0])]
            label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])
            current_axis.add_patch(
                plt.Rectangle((xmin, ymin),
                              xmax - xmin,
                              ymax - ymin,
                              color=color,
                              fill=False,
                              linewidth=2))
            current_axis.text(xmin,
                              ymin,
                              label,
                              size='x-large',
                              color='white',
                              bbox={
                                  'facecolor': color,
                                  'alpha': 1.0
                              })

        print('Number of original boxes : {}'.format(len_orig))
        print('Number of found boxes : {}'.format(len_found))
        plt.show()
        count += 1
Example #3
0
def train_VOC(config):
    '''
    Train the given configuration ; the configuration must be constructed
    according to the utility script found in utils/generateconfig.py. 

    Arguments:
        config : the configuration of the model to use ; should already be
            loaded

    '''
    ###################################
    ### PATHS AND PARAMETERS
    ##################################
    datadir = config.DATA_DIR
    local_dir = config.ROOT_FOLDER
    img_shape = config.IMG_SHAPE
    classes = config.CLASSES
    checkpoint_output = os.path.join(local_dir, 'models',
                                     config.CHECKPOINT_NAME)
    model_output = os.path.join(local_dir, 'models', config.MODEL_NAME)
    img_height = img_shape[0]  # Height of the model input images
    img_width = img_shape[1]  # Width of the model input images
    img_channels = img_shape[
        2]  # Number of color channels of the model input images
    mean_color = [
        123, 117, 104
    ]  # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights.
    swap_channels = [
        2, 1, 0
    ]  # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images.
    n_classes = 20  # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO
    scales_pascal = [
        0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05
    ]  # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets
    scales = scales_pascal
    aspect_ratios = [
        [1.0,
         2.0, 0.5], [1.0, 2.0, 0.5, 3.0,
                     1.0 / 3.0],
        [1.0, 2.0, 0.5,
         3.0, 1.0 / 3.0], [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0
                           ], [1.0, 2.0, 0.5
                               ], [1.0, 2.0, 0.5]
    ]  # The anchor box aspect ratios used in the original SSD300; the order matters
    two_boxes_for_ar1 = True
    steps = [
        8, 16, 32, 64, 100, 300
    ]  # The space between two adjacent anchor box center points for each predictor layer.
    offsets = [
        0.5, 0.5, 0.5, 0.5, 0.5, 0.5
    ]  # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.
    clip_boxes = False  # Whether or not to clip the anchor boxes to lie entirely within the image boundaries
    variances = [
        0.1, 0.1, 0.2, 0.2
    ]  # The variances by which the encoded target coordinates are divided as in the original implementation
    normalize_coords = True
    batch_size = config.BATCH_SIZE  # Change the batch size if you like, or if you run into GPU memory issues.

    ###################################
    ### BUILDING MODEL
    ##################################
    K.clear_session()  # Clear previous models from memory.

    model = ssd_300(image_size=(img_height, img_width, img_channels),
                    n_classes=n_classes,
                    mode='training',
                    l2_regularization=0.0005,
                    scales=scales,
                    aspect_ratios_per_layer=aspect_ratios,
                    two_boxes_for_ar1=two_boxes_for_ar1,
                    steps=steps,
                    offsets=offsets,
                    clip_boxes=clip_boxes,
                    variances=variances,
                    normalize_coords=normalize_coords,
                    subtract_mean=mean_color,
                    swap_channels=swap_channels)

    weights_path = os.path.join(local_dir, 'weights',
                                'VGG_VOC0712_SSD_300x300_iter_120000.h5')
    model.load_weights(weights_path, by_name=True)

    #adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    sgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)
    ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
    model.compile(optimizer=sgd, loss=ssd_loss.compute_loss)

    ###################################
    ### LOADING DATAS
    ##################################
    train_dataset = DataGenerator(load_images_into_memory=False,
                                  hdf5_dataset_path=None)
    val_dataset = DataGenerator(load_images_into_memory=False,
                                hdf5_dataset_path=None)
    images_dir = os.path.join(datadir, 'Images')
    annotations_dir = os.path.join(datadir, 'Annotations')
    trainval_image_set_filename = os.path.join(datadir, 'ImageSets',
                                               'train.txt')
    test_image_set_filename = os.path.join(datadir, 'ImageSets', 'val.txt')

    # The XML parser needs to now what object class names to look for and in which order to map them to integers.
    #

    train_dataset.parse_xml(images_dirs=[images_dir],
                            image_set_filenames=[trainval_image_set_filename],
                            annotations_dirs=[annotations_dir],
                            classes=classes,
                            include_classes='all',
                            exclude_truncated=False,
                            exclude_difficult=False,
                            ret=False)

    val_dataset.parse_xml(images_dirs=[images_dir],
                          image_set_filenames=[test_image_set_filename],
                          annotations_dirs=[annotations_dir],
                          classes=classes,
                          include_classes='all',
                          exclude_truncated=False,
                          exclude_difficult=True,
                          ret=False)

    train_dataset.create_hdf5_dataset(file_path='flowers_train.h5',
                                      resize=False,
                                      variable_image_size=True,
                                      verbose=True)

    val_dataset.create_hdf5_dataset(file_path='flowers_val.h5',
                                    resize=False,
                                    variable_image_size=True,
                                    verbose=True)

    ssd_data_augmentation = SSDDataAugmentation(img_height=img_height,
                                                img_width=img_width,
                                                background=mean_color)
    convert_to_3_channels = ConvertTo3Channels()
    resize = Resize(height=img_height, width=img_width)

    # The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.
    predictor_sizes = [
        model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],
        model.get_layer('fc7_mbox_conf').output_shape[1:3],
        model.get_layer('conv6_2_mbox_conf').output_shape[1:3],
        model.get_layer('conv7_2_mbox_conf').output_shape[1:3],
        model.get_layer('conv8_2_mbox_conf').output_shape[1:3],
        model.get_layer('conv9_2_mbox_conf').output_shape[1:3]
    ]

    ssd_input_encoder = SSDInputEncoder(img_height=img_height,
                                        img_width=img_width,
                                        n_classes=n_classes,
                                        predictor_sizes=predictor_sizes,
                                        scales=scales,
                                        aspect_ratios_per_layer=aspect_ratios,
                                        two_boxes_for_ar1=two_boxes_for_ar1,
                                        steps=steps,
                                        offsets=offsets,
                                        clip_boxes=clip_boxes,
                                        variances=variances,
                                        matching_type='multi',
                                        pos_iou_threshold=0.5,
                                        neg_iou_limit=0.5,
                                        normalize_coords=normalize_coords)

    train_generator = train_dataset.generate(
        batch_size=batch_size,
        shuffle=True,
        transformations=[ssd_data_augmentation],
        label_encoder=ssd_input_encoder,
        returns={'processed_images', 'encoded_labels'},
        keep_images_without_gt=False)

    val_generator = val_dataset.generate(
        batch_size=batch_size,
        shuffle=False,
        transformations=[convert_to_3_channels, resize],
        label_encoder=ssd_input_encoder,
        returns={'processed_images', 'encoded_labels'},
        keep_images_without_gt=False)

    # Get the number of samples in the training and validations datasets.
    train_dataset_size = train_dataset.get_dataset_size()
    val_dataset_size = val_dataset.get_dataset_size()

    print("Number of images in the training dataset:\t{:>6}".format(
        train_dataset_size))
    print("Number of images in the validation dataset:\t{:>6}".format(
        val_dataset_size))

    ###################################
    ### PREPARE TRAINING
    ##################################

    def lr_schedule(epoch):
        if epoch < 80:
            return 0.001
        elif epoch < 100:
            return 0.0001
        else:
            return 0.00001

    model_checkpoint = ModelCheckpoint(filepath=checkpoint_output,
                                       monitor='val_loss',
                                       verbose=1,
                                       save_best_only=True,
                                       save_weights_only=False,
                                       mode='auto',
                                       period=1)

    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0.0,
                                   patience=10,
                                   verbose=1)

    learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule,
                                                    verbose=1)

    terminate_on_nan = TerminateOnNaN()

    callbacks = [
        model_checkpoint, learning_rate_scheduler, terminate_on_nan,
        early_stopping
    ]

    ###################################
    ### TRAINING
    ##################################
    epochs = config.EPOCHS
    steps_per_epoch = ceil(train_dataset_size / batch_size)
    model.summary()
    history = model.fit_generator(generator=train_generator,
                                  steps_per_epoch=steps_per_epoch,
                                  epochs=epochs,
                                  callbacks=callbacks,
                                  validation_data=val_generator,
                                  validation_steps=ceil(val_dataset_size /
                                                        batch_size))

    model.save(model_output)
Example #4
0
def get_dataset(
    args: argparse.Namespace, model: Model
) -> Tuple[Iterable[List[np.array]], Iterable[List[np.array]], int]:
    train_dataset = DataGenerator(load_images_into_memory=False,
                                  hdf5_dataset_path=None)
    val_dataset = DataGenerator(load_images_into_memory=False,
                                hdf5_dataset_path=None)

    VOC_2007_images_dir = os.path.join(args.data_dir, '/VOC2007/JPEGImages/')
    VOC_2012_images_dir = os.path.join(args.data_dir, '/VOC2012/JPEGImages/')

    VOC_2007_annotations_dir = os.path.join(args.data_dir,
                                            '/VOC2007/Annotations/')
    VOC_2012_annotations_dir = os.path.join(args.data_dir,
                                            '/VOC2012/Annotations/')

    VOC_2007_trainval_image_set_filename = os.path.join(
        args.data_dir, '/VOC2007/ImageSets/Main/trainval.txt')
    VOC_2012_trainval_image_set_filename = os.path.join(
        args.data_dir, '/VOC2012/ImageSets/Main/trainval.txt')
    VOC_2007_test_image_set_filename = os.path.join(
        args.data_dir, '/VOC2007/ImageSets/Main/test.txt')

    classes = [
        'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
        'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
        'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
        'tvmonitor'
    ]

    train_dataset.parse_xml(
        images_dirs=[VOC_2007_images_dir, VOC_2012_images_dir],
        image_set_filenames=[
            VOC_2007_trainval_image_set_filename,
            VOC_2012_trainval_image_set_filename
        ],
        annotations_dirs=[VOC_2007_annotations_dir, VOC_2012_annotations_dir],
        classes=classes,
        include_classes='all',
        exclude_truncated=False,
        exclude_difficult=False,
        ret=False)

    val_dataset.parse_xml(
        images_dirs=[VOC_2007_images_dir],
        image_set_filenames=[VOC_2007_test_image_set_filename],
        annotations_dirs=[VOC_2007_annotations_dir],
        classes=classes,
        include_classes='all',
        exclude_truncated=False,
        exclude_difficult=True,
        ret=False)

    train_dataset.create_hdf5_dataset(
        file_path='dataset_pascal_voc_07+12_trainval.h5',
        resize=False,
        variable_image_size=True,
        verbose=True)

    val_dataset.create_hdf5_dataset(file_path='dataset_pascal_voc_07_test.h5',
                                    resize=False,
                                    variable_image_size=True,
                                    verbose=True)

    ssd_data_augmentation = SSDDataAugmentation(img_height=args.img_height,
                                                img_width=args.img_width,
                                                background=args.mean_color)

    # For the validation generator:
    convert_to_3_channels = ConvertTo3Channels()
    resize = Resize(height=args.img_height, width=args.img_width)

    # 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.

    # The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.
    predictor_sizes = [
        model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],
        model.get_layer('fc7_mbox_conf').output_shape[1:3],
        model.get_layer('conv6_2_mbox_conf').output_shape[1:3],
        model.get_layer('conv7_2_mbox_conf').output_shape[1:3],
        model.get_layer('conv8_2_mbox_conf').output_shape[1:3],
        model.get_layer('conv9_2_mbox_conf').output_shape[1:3]
    ]

    ssd_input_encoder = SSDInputEncoder(
        img_height=args.img_height,
        img_width=args.img_width,
        n_classes=args.n_classes,
        predictor_sizes=predictor_sizes,
        scales=args.scales,
        aspect_ratios_per_layer=args.aspect_ratios,
        two_boxes_for_ar1=args.two_boxes_for_ar1,
        steps=args.steps,
        offsets=args.offsets,
        clip_boxes=args.clip_boxes,
        variances=args.variances,
        matching_type='multi',
        pos_iou_threshold=0.5,
        neg_iou_limit=0.5,
        normalize_coords=args.normalize_coords)

    train_generator = train_dataset.generate(
        batch_size=args.batch_size,
        shuffle=True,
        transformations=[ssd_data_augmentation],
        label_encoder=ssd_input_encoder,
        returns={'processed_images', 'encoded_labels'},
        keep_images_without_gt=False)

    val_generator = val_dataset.generate(
        batch_size=args.batch_size,
        shuffle=False,
        transformations=[convert_to_3_channels, resize],
        label_encoder=ssd_input_encoder,
        returns={'processed_images', 'encoded_labels'},
        keep_images_without_gt=False)
    return train_generator, val_generator, val_dataset.get_dataset_size()
Example #5
0
VOC_2007_images_dir = 'VOCdevkit/PETS/JPEGImages/'
VOC_2007_annotations_dir = 'VOCdevkit/PETS/Annotations/'
VOC_2007_test_image_set_filename = 'VOCdevkit/PETS/ImageSets/Main/view7_bc.txt'

# The XML parser needs to now what object class names to look for and in which order to map them to integers.
classes = [
    'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
    'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
    'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
]

dataset.parse_xml(images_dirs=[VOC_2007_images_dir],
                  image_set_filenames=[VOC_2007_test_image_set_filename],
                  annotations_dirs=[VOC_2007_annotations_dir],
                  classes=classes,
                  include_classes='all',
                  exclude_truncated=False,
                  exclude_difficult=True,
                  ret=False)

convert_to_3_channels = ConvertTo3Channels()
resize = Resize(height=img_height, width=img_width)

generator = dataset.generate(batch_size=1,
                             shuffle=True,
                             transformations=[convert_to_3_channels, resize],
                             returns={
                                 'processed_images', 'filenames',
                                 'inverse_transform', 'original_images',
                                 'original_labels', 'masks'
                             },
Example #6
0
# VOC_2013_sampleweights_dir = '/data/deeplearn/VOCdevkit/VOC2013/Weights/'
VOC_2013_images_dir = '/data/deeplearn/VOCdevkit/URPC2018/JPEGImages/'
VOC_2013_annotations_dir = '/data/deeplearn/VOCdevkit/URPC2018/Annotations/'
VOC_2013_trainval_image_set_filename = '/data/deeplearn/VOCdevkit/URPC2018/ImageSets/Main/trainval.txt'
VOC_2013_test_image_set_filename = '/data/deeplearn/VOCdevkit/URPC2018/ImageSets/Main/test.txt'
classes = ['background', 'seacucumber', 'seaurchin', 'scallop', 'starfish']

train_dataset = DataGenerator(load_images_into_memory=True,
                              hdf5_dataset_path=None)
val_dataset = DataGenerator(load_images_into_memory=True,
                            hdf5_dataset_path=None)
train_dataset.parse_xml(
    images_dirs=[VOC_2013_images_dir],
    image_set_filenames=[VOC_2013_trainval_image_set_filename],
    sample_weights_dirs=VOC_2013_sampleweights_dir,
    annotations_dirs=[VOC_2013_annotations_dir],
    classes=classes,
    include_classes='all',
    exclude_truncated=False,
    exclude_difficult=False,
    ret=False)
val_dataset.parse_xml(images_dirs=[VOC_2013_images_dir],
                      image_set_filenames=[VOC_2013_test_image_set_filename],
                      annotations_dirs=[VOC_2013_annotations_dir],
                      classes=classes,
                      include_classes='all',
                      exclude_truncated=False,
                      exclude_difficult=True,
                      ret=False)
train_dataset.create_hdf5_dataset(
    file_path='dataset_pascal_voc_URPC2018_trainval.h5',
    resize=False,
"""images_dir = '/home/rblin/Documents/BD_QCAV/test/RGB_test'
annotations_dir = '/home/rblin/Documents/BD_QCAV/test/RGB_LABELS_test'
image_set_filename = '/home/rblin/Documents/BD_QCAV/test/rgb_rduced.txt'"""
"""images_dir = '/home/rblin/Images/BD_ITSC/test_rgb/RGB'
annotations_dir = '/home/rblin/Images/BD_ITSC/test_rgb/LABELS'
image_set_filename = '/home/rblin/Images/BD_ITSC/test_rgb/test_rgb.txt'"""

# The XML parser needs to now what object class names to look for and in which order to map them to integers.
classes = ['background', 'person', 'car', 'bike', 'motorbike']
#classes = ['background', 'person', 'car']

dataset.parse_xml(
    images_dirs=[images_dir],
    image_set_filenames=[image_set_filename],
    annotations_dirs=[annotations_dir],
    classes=classes[:4],
    #include_classes='all',
    include_classes=[0, 1, 2],
    exclude_truncated=False,
    exclude_difficult=False,
    ret=False)

evaluator = Evaluator(model=model,
                      n_classes=2,
                      data_generator=dataset,
                      model_mode=model_mode)

results = evaluator(img_height=img_height,
                    img_width=img_width,
                    batch_size=8,
                    data_generator_mode='resize',
                    round_confidences=False,
	# The directories that contain the annotations.
	annotations_dir      = '/home/docker/Jessi/smart-traffic-sensor-lab/vehicles-dataset/annotations/'

	# The paths to the image sets.
	train_image_set_filename    = '/home/docker/Jessi/ssd_keras-1/train.txt'
	test_image_set_filename      = '/home/docker/Jessi/ssd_keras-1/test.txt'

	classes = ['None','motorcycle','car', 'van', 'bus', 'truck',
		   'small-truck', 'tank-truck']

	train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
	val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
	num_car,num_motorcycle,num_van,num_bus,num_truck,num_small_truck,num_tank_truck = train_dataset.parse_xml(images_dirs=[images_dir],
				image_set_filenames = [train_image_set_filename],
		                annotations_dirs=[annotations_dir],
		                classes=classes,
		                include_classes='all',
		                exclude_truncated=False,
		                exclude_difficult=False,
		                ret=True)
	print(num_car,num_motorcycle,num_van,num_bus,num_truck,num_small_truck,num_tank_truck)
	# 6: Create the validation set batch generator (if you want to use a validation dataset)

	
	val_dataset.parse_xml(images_dirs=[images_dir],
			      image_set_filenames = [test_image_set_filename],
		              annotations_dirs=[annotations_dir],
		              classes=classes,
		              include_classes='all',
		              exclude_truncated=False,
		              exclude_difficult=False,
		              ret=False)
def _main_(args):

    config_path = args.conf

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    ###############################
    #   Parse the annotations
    ###############################
    path_imgs_training = config['train']['train_image_folder']
    path_anns_training = config['train']['train_annot_folder']
    path_imgs_val = config['valid']['valid_image_folder']
    path_anns_val = config['valid']['valid_annot_folder']
    labels = config['model']['labels']
    categories = {}
    #categories = {"Razor": 1, "Gun": 2, "Knife": 3, "Shuriken": 4} #la categoría 0 es la background
    for i in range(len(labels)):
        categories[labels[i]] = i + 1
    print('\nTraining on: \t' + str(categories) + '\n')

    ####################################
    #   Parameters
    ###################################
    #%%
    img_height = config['model']['input']  # Height of the model input images
    img_width = config['model']['input']  # Width of the model input images
    img_channels = 3  # Number of color channels of the model input images
    mean_color = [
        123, 117, 104
    ]  # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights.
    swap_channels = [
        2, 1, 0
    ]  # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images.
    n_classes = len(
        labels
    )  # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO
    scales_pascal = [
        0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05
    ]  # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets
    #scales_coco = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets
    scales = scales_pascal
    aspect_ratios = [
        [1.0,
         2.0, 0.5], [1.0, 2.0, 0.5, 3.0,
                     1.0 / 3.0],
        [1.0, 2.0, 0.5,
         3.0, 1.0 / 3.0], [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0
                           ], [1.0, 2.0, 0.5
                               ], [1.0, 2.0, 0.5]
    ]  # The anchor box aspect ratios used in the original SSD300; the order matters
    two_boxes_for_ar1 = True
    steps = [
        8, 16, 32, 64, 100, 300
    ]  # The space between two adjacent anchor box center points for each predictor layer.
    offsets = [
        0.5, 0.5, 0.5, 0.5, 0.5, 0.5
    ]  # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.
    clip_boxes = False  # Whether or not to clip the anchor boxes to lie entirely within the image boundaries
    variances = [
        0.1, 0.1, 0.2, 0.2
    ]  # The variances by which the encoded target coordinates are divided as in the original implementation
    normalize_coords = True

    K.clear_session()  # Clear previous models from memory.

    model_path = config['train']['saved_weights_name']
    # 3: Instantiate an optimizer and the SSD loss function and compile the model.
    #    If you want to follow the original Caffe implementation, use the preset SGD
    #    optimizer, otherwise I'd recommend the commented-out Adam optimizer.

    if config['model']['backend'] == 'ssd512':
        aspect_ratios = [[1.0, 2.0, 0.5], [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0],
                         [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0],
                         [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0],
                         [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0], [1.0, 2.0, 0.5],
                         [1.0, 2.0, 0.5]]
        steps = [
            8, 16, 32, 64, 100, 200, 300
        ]  # The space between two adjacent anchor box center points for each predictor layer.
        offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
        scales = [0.07, 0.15, 0.3, 0.45, 0.6, 0.75, 0.9, 1.05]

    elif config['model']['backend'] == 'ssd7':
        #weights_path = 'VGG_ILSVRC_16_layers_fc_reduced.h5'
        scales = [
            0.08, 0.16, 0.32, 0.64, 0.96
        ]  # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.
        aspect_ratios = [0.5, 1.0,
                         2.0]  # The list of aspect ratios for the anchor boxes
        two_boxes_for_ar1 = True  # Whether or not you want to generate two anchor boxes for aspect ratio 1
        steps = None  # In case you'd like to set the step sizes for the anchor box grids manually; not recommended
        offsets = None

    if os.path.exists(model_path):
        print("\nLoading pretrained weights.\n")
        # We need to create an SSDLoss object in order to pass that to the model loader.
        ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)

        K.clear_session()  # Clear previous models from memory.
        model = load_model(model_path,
                           custom_objects={
                               'AnchorBoxes': AnchorBoxes,
                               'L2Normalization': L2Normalization,
                               'compute_loss': ssd_loss.compute_loss
                           })

    else:
        ####################################
        #   Build the Keras model.
        ###################################

        if config['model']['backend'] == 'ssd300':
            #weights_path = 'VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.h5'
            from models.keras_ssd300 import ssd_300 as ssd

            model = ssd_300(image_size=(img_height, img_width, img_channels),
                            n_classes=n_classes,
                            mode='training',
                            l2_regularization=0.0005,
                            scales=scales,
                            aspect_ratios_per_layer=aspect_ratios,
                            two_boxes_for_ar1=two_boxes_for_ar1,
                            steps=steps,
                            offsets=offsets,
                            clip_boxes=clip_boxes,
                            variances=variances,
                            normalize_coords=normalize_coords,
                            subtract_mean=mean_color,
                            swap_channels=swap_channels)

        elif config['model']['backend'] == 'ssd512':
            #weights_path = 'VGG_VOC0712Plus_SSD_512x512_ft_iter_160000.h5'
            from models.keras_ssd512 import ssd_512 as ssd

            # 2: Load some weights into the model.
            model = ssd(image_size=(img_height, img_width, img_channels),
                        n_classes=n_classes,
                        mode='training',
                        l2_regularization=0.0005,
                        scales=scales,
                        aspect_ratios_per_layer=aspect_ratios,
                        two_boxes_for_ar1=two_boxes_for_ar1,
                        steps=steps,
                        offsets=offsets,
                        clip_boxes=clip_boxes,
                        variances=variances,
                        normalize_coords=normalize_coords,
                        swap_channels=swap_channels)

        elif config['model']['backend'] == 'ssd7':
            #weights_path = 'VGG_ILSVRC_16_layers_fc_reduced.h5'
            from models.keras_ssd7 import build_model as ssd
            scales = [
                0.08, 0.16, 0.32, 0.64, 0.96
            ]  # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.
            aspect_ratios = [
                0.5, 1.0, 2.0
            ]  # The list of aspect ratios for the anchor boxes
            two_boxes_for_ar1 = True  # Whether or not you want to generate two anchor boxes for aspect ratio 1
            steps = None  # In case you'd like to set the step sizes for the anchor box grids manually; not recommended
            offsets = None
            model = ssd(image_size=(img_height, img_width, img_channels),
                        n_classes=n_classes,
                        mode='training',
                        l2_regularization=0.0005,
                        scales=scales,
                        aspect_ratios_global=aspect_ratios,
                        aspect_ratios_per_layer=None,
                        two_boxes_for_ar1=two_boxes_for_ar1,
                        steps=steps,
                        offsets=offsets,
                        clip_boxes=clip_boxes,
                        variances=variances,
                        normalize_coords=normalize_coords,
                        subtract_mean=None,
                        divide_by_stddev=None)

        else:
            print('Wrong Backend')

        print('OK create model')
        #sgd = SGD(lr=config['train']['learning_rate'], momentum=0.9, decay=0.0, nesterov=False)

        # TODO: Set the path to the weights you want to load. only for ssd300 or ssd512

        weights_path = 'VGG_ILSVRC_16_layers_fc_reduced.h5'
        print("\nLoading pretrained weights VGG.\n")
        model.load_weights(weights_path, by_name=True)

        # 3: Instantiate an optimizer and the SSD loss function and compile the model.
        #    If you want to follow the original Caffe implementation, use the preset SGD
        #    optimizer, otherwise I'd recommend the commented-out Adam optimizer.

        #adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        #sgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)
        optimizer = Adam(lr=config['train']['learning_rate'],
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
        model.compile(optimizer=optimizer, loss=ssd_loss.compute_loss)

        model.summary()

    #####################################################################
    #  Instantiate two `DataGenerator` objects: One for training, one for validation.
    ######################################################################
    # Optional: If you have enough memory, consider loading the images into memory for the reasons explained above.

    train_dataset = DataGenerator(load_images_into_memory=False,
                                  hdf5_dataset_path=None)
    val_dataset = DataGenerator(load_images_into_memory=False,
                                hdf5_dataset_path=None)

    # 2: Parse the image and label lists for the training and validation datasets. This can take a while.

    # The XML parser needs to now what object class names to look for and in which order to map them to integers.
    classes = ['background'] + labels

    train_dataset.parse_xml(
        images_dirs=[config['train']['train_image_folder']],
        image_set_filenames=[config['train']['train_image_set_filename']],
        annotations_dirs=[config['train']['train_annot_folder']],
        classes=classes,
        include_classes='all',
        exclude_truncated=False,
        exclude_difficult=False,
        ret=False)

    val_dataset.parse_xml(
        images_dirs=[config['valid']['valid_image_folder']],
        image_set_filenames=[config['valid']['valid_image_set_filename']],
        annotations_dirs=[config['valid']['valid_annot_folder']],
        classes=classes,
        include_classes='all',
        exclude_truncated=False,
        exclude_difficult=False,
        ret=False)

    #########################
    # 3: Set the batch size.
    #########################
    batch_size = config['train'][
        'batch_size']  # Change the batch size if you like, or if you run into GPU memory issues.

    ##########################
    # 4: Set the image transformations for pre-processing and data augmentation options.
    ##########################
    # For the training generator:

    # For the validation generator:
    convert_to_3_channels = ConvertTo3Channels()
    resize = Resize(height=img_height, width=img_width)

    ######################################3
    # 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.
    #########################################
    # The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.
    if config['model']['backend'] == 'ssd512':
        predictor_sizes = [
            model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],
            model.get_layer('fc7_mbox_conf').output_shape[1:3],
            model.get_layer('conv6_2_mbox_conf').output_shape[1:3],
            model.get_layer('conv7_2_mbox_conf').output_shape[1:3],
            model.get_layer('conv8_2_mbox_conf').output_shape[1:3],
            model.get_layer('conv9_2_mbox_conf').output_shape[1:3],
            model.get_layer('conv10_2_mbox_conf').output_shape[1:3]
        ]

        ssd_input_encoder = SSDInputEncoder(
            img_height=img_height,
            img_width=img_width,
            n_classes=n_classes,
            predictor_sizes=predictor_sizes,
            scales=scales,
            aspect_ratios_per_layer=aspect_ratios,
            two_boxes_for_ar1=two_boxes_for_ar1,
            steps=steps,
            offsets=offsets,
            clip_boxes=clip_boxes,
            variances=variances,
            matching_type='multi',
            pos_iou_threshold=0.5,
            neg_iou_limit=0.5,
            normalize_coords=normalize_coords)

    elif config['model']['backend'] == 'ssd300':
        predictor_sizes = [
            model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],
            model.get_layer('fc7_mbox_conf').output_shape[1:3],
            model.get_layer('conv6_2_mbox_conf').output_shape[1:3],
            model.get_layer('conv7_2_mbox_conf').output_shape[1:3],
            model.get_layer('conv8_2_mbox_conf').output_shape[1:3],
            model.get_layer('conv9_2_mbox_conf').output_shape[1:3]
        ]
        ssd_input_encoder = SSDInputEncoder(
            img_height=img_height,
            img_width=img_width,
            n_classes=n_classes,
            predictor_sizes=predictor_sizes,
            scales=scales,
            aspect_ratios_per_layer=aspect_ratios,
            two_boxes_for_ar1=two_boxes_for_ar1,
            steps=steps,
            offsets=offsets,
            clip_boxes=clip_boxes,
            variances=variances,
            matching_type='multi',
            pos_iou_threshold=0.5,
            neg_iou_limit=0.5,
            normalize_coords=normalize_coords)

    elif config['model']['backend'] == 'ssd7':
        predictor_sizes = [
            model.get_layer('classes4').output_shape[1:3],
            model.get_layer('classes5').output_shape[1:3],
            model.get_layer('classes6').output_shape[1:3],
            model.get_layer('classes7').output_shape[1:3]
        ]
        ssd_input_encoder = SSDInputEncoder(
            img_height=img_height,
            img_width=img_width,
            n_classes=n_classes,
            predictor_sizes=predictor_sizes,
            scales=scales,
            aspect_ratios_global=aspect_ratios,
            two_boxes_for_ar1=two_boxes_for_ar1,
            steps=steps,
            offsets=offsets,
            clip_boxes=clip_boxes,
            variances=variances,
            matching_type='multi',
            pos_iou_threshold=0.5,
            neg_iou_limit=0.3,
            normalize_coords=normalize_coords)

    #######################
    # 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.
    #######################

    train_generator = train_dataset.generate(
        batch_size=batch_size,
        shuffle=True,
        transformations=[
            SSDDataAugmentation(img_height=img_height, img_width=img_width)
        ],
        label_encoder=ssd_input_encoder,
        returns={'processed_images', 'encoded_labels'},
        keep_images_without_gt=False)

    val_generator = val_dataset.generate(
        batch_size=batch_size,
        shuffle=False,
        transformations=[convert_to_3_channels, resize],
        label_encoder=ssd_input_encoder,
        returns={'processed_images', 'encoded_labels'},
        keep_images_without_gt=False)

    # Get the number of samples in the training and validations datasets.
    train_dataset_size = train_dataset.get_dataset_size()
    val_dataset_size = val_dataset.get_dataset_size()

    print("Number of images in the training dataset:\t{:>6}".format(
        train_dataset_size))
    print("Number of images in the validation dataset:\t{:>6}".format(
        val_dataset_size))

    ##########################
    # Define model callbacks.
    #########################

    # TODO: Set the filepath under which you want to save the model.
    model_checkpoint = ModelCheckpoint(
        filepath=config['train']['saved_weights_name'],
        monitor='val_loss',
        verbose=1,
        save_best_only=True,
        save_weights_only=False,
        mode='auto',
        period=1)
    #model_checkpoint.best =

    csv_logger = CSVLogger(filename='log.csv', separator=',', append=True)

    learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule,
                                                    verbose=1)

    terminate_on_nan = TerminateOnNaN()

    callbacks = [
        model_checkpoint, csv_logger, learning_rate_scheduler, terminate_on_nan
    ]

    #print(model.summary())
    batch_images, batch_labels = next(train_generator)

    #    i = 0 # Which batch item to look at
    #
    #    print("Image:", batch_filenames[i])
    #    print()
    #    print("Ground truth boxes:\n")
    #    print(batch_labels[i])

    initial_epoch = 0
    final_epoch = config['train']['nb_epochs']
    #final_epoch     = 20
    steps_per_epoch = 500

    history = model.fit_generator(generator=train_generator,
                                  steps_per_epoch=steps_per_epoch,
                                  epochs=final_epoch,
                                  callbacks=callbacks,
                                  validation_data=val_generator,
                                  validation_steps=ceil(val_dataset_size /
                                                        batch_size),
                                  initial_epoch=initial_epoch,
                                  verbose=1 if config['train']['debug'] else 2)
def main():
    # create dataset
    dataset = DataGenerator()
    dataset.parse_xml(images_dirs=[dataset_images_dir],
                      image_set_filenames=[test_image_set_filename],
                      annotations_dirs=[dataset_annotations_dir],
                      classes=classes,
                      include_classes='all',
                      exclude_truncated=False,
                      exclude_difficult=False,
                      ret=False)

    # create model
    model = ssd_300(
        image_size=(img_height, img_width, 3),
        n_classes=n_classes,
        mode=model_mode,
        l2_regularization=0.0005,
        scales=[
            0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05
        ],  # The scales for MS COCO are [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05]
        aspect_ratios_per_layer=[[1.0, 2.0, 0.5],
                                 [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0],
                                 [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0],
                                 [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0],
                                 [1.0, 2.0, 0.5], [1.0, 2.0, 0.5]],
        two_boxes_for_ar1=True,
        steps=None,
        offsets=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
        clip_boxes=False,
        variances=[0.1, 0.1, 0.2, 0.2],
        normalize_coords=True,
        subtract_mean=[123, 117, 104],
        swap_channels=[2, 1, 0],
        confidence_thresh=1.0e-4,
        iou_threshold=0.45,
        top_k=200,
        nms_max_output_size=400)

    # load weights and compile it
    model.load_weights(weights_path, by_name=True)
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
    model.compile(optimizer=adam, loss=ssd_loss.compute_loss)

    evaluator = Evaluator(model=model,
                          n_classes=n_classes,
                          data_generator=dataset,
                          model_mode=model_mode)

    results = evaluator(img_height=img_height,
                        img_width=img_width,
                        batch_size=8,
                        data_generator_mode='resize',
                        round_confidences=False,
                        matching_iou_threshold=0.5,
                        border_pixels='include',
                        sorting_algorithm='quicksort',
                        average_precision_mode='sample',
                        num_recall_points=11,
                        ignore_neutral_boxes=True,
                        return_precisions=True,
                        return_recalls=True,
                        return_average_precisions=True,
                        verbose=True)

    mean_average_precision, average_precisions, precisions, recalls = results

    for i in range(1, len(average_precisions)):
        print("{:<14}{:<6}{}".format(classes[i], 'AP',
                                     round(average_precisions[i], 3)))
    print()
    print("{:<14}{:<6}{}".format('', 'mAP', round(mean_average_precision, 3)))

    m = max((n_classes + 1) // 2, 2)
    n = 2

    fig, cells = plt.subplots(m, n, figsize=(n * 8, m * 8))
    for i in range(m):
        for j in range(n):
            if n * i + j + 1 > n_classes: break
            cells[i, j].plot(recalls[n * i + j + 1],
                             precisions[n * i + j + 1],
                             color='blue',
                             linewidth=1.0)
            cells[i, j].set_xlabel('recall', fontsize=14)
            cells[i, j].set_ylabel('precision', fontsize=14)
            cells[i, j].grid(True)
            cells[i, j].set_xticks(np.linspace(0, 1, 6))
            cells[i, j].set_yticks(np.linspace(0, 1, 6))
            cells[i, j].set_xlim(0.0, 1.0)
            cells[i, j].set_ylim(0.0, 1.0)
            cells[i, j].set_title("{}, AP: {:.3f}".format(
                classes[n * i + j + 1], average_precisions[n * i + j + 1]),
                                  fontsize=16)

    if not os.path.isdir("evaluate_result"):
        os.makedirs("evaluate_result")

    plt.savefig('evaluate_result/ssd300_face_detection.png')
    # The directories that contain the annotations.
    Cityscapes_annotation_dir = '../../datasets/Cityscapes/Annotations'

    # The paths to the image sets.
    Cityscapes_train_source_image_set_filename = '../../datasets/Cityscapes/ImageSets/Main/train_source.txt'
    Cityscapes_train_target_image_set_filename = '../../datasets/Cityscapes/ImageSets/Main/train_target.txt'
    Cityscapes_test_target_image_set_filename = '../../datasets/Cityscapes/ImageSets/Main/test.txt'

    # images_dirs, image_set_filenames, and annotations_dirs should have the same length
    train_dataset.parse_xml(images_dirs=[Cityscapes_images_dir,
                                         Cityscapes_images_dir],
                            image_set_filenames=[Cityscapes_train_source_image_set_filename,
                                                 Cityscapes_train_target_image_set_filename],
                            annotations_dirs=[Cityscapes_annotation_dir,
                                              Cityscapes_annotation_dir],
                            classes=classes,
                            include_classes='all',
                            exclude_truncated=False,
                            exclude_difficult=False,
                            ret=False)

    val_dataset.parse_xml(images_dirs=[Cityscapes_images_dir],
                          image_set_filenames=[Cityscapes_test_target_image_set_filename],
                          annotations_dirs=[Cityscapes_annotation_dir],
                          classes=classes,
                          include_classes='all',
                          exclude_truncated=False,
                          exclude_difficult=True,
                          ret=False)
# TODO: Set the paths to the datasets here.

IMAGES_DIR = getenv("IMAGES_DIR")
ANNOTATIONS_DIR = getenv("ANNOTATIONS_DIR")

TRAIN_IMAGE_SET_FILENAME = getenv("TRAIN_IMAGESET_FILENAME")
VAL_IMAGE_SET_FILENAME = getenv("VAL_IMAGESET_FILENAME")
TEST_IMAGE_SET_FILENAME = getenv("TEST_IMAGESET_FILENAME")

# The XML parser needs to now what object class names to look for and in which order to map them to integers.
classes = ['background', '0_1', '0_2', '0_3', 'p', 'g']

train_dataset.parse_xml(images_dirs=[IMAGES_DIR],
                        image_set_filenames=[TRAIN_IMAGE_SET_FILENAME],
                        annotations_dirs=[ANNOTATIONS_DIR],
                        classes=classes,
                        include_classes='all',
                        exclude_truncated=False,
                        exclude_difficult=False,
                        ret=False)

val_dataset.parse_xml(images_dirs=[IMAGES_DIR],
                      image_set_filenames=[VAL_IMAGE_SET_FILENAME],
                      annotations_dirs=[ANNOTATIONS_DIR],
                      classes=classes,
                      include_classes='all',
                      exclude_truncated=False,
                      exclude_difficult=True,
                      ret=False)

# 3: Set the batch size.
Example #13
0
def run(train_dir, valid_dir, set_dir, model_dir):
    # train_dir = arguments.train_dir
    # valid_dir = arguments.valid_dir

    train_dataset_dir = train_dir
    train_annot_dir = train_dir + '/annot/'
    train_set = train_dir + '/img_set.txt'

    valid_dataset_dir = valid_dir
    valid_annot_dir = valid_dir + '/annot/'
    valid_set = valid_dir + '/valid_set.txt'

    # Set Training and Validation dataset paths
    batch_size = 16
    print('Using batch size of: {}'.format(batch_size))
    #model_path = 'COCO_512.h5'
    model_path = model_dir
    # model_path = 'saved_model.h5'
    # Needs to know classes and order to map to integers
    classes = ['background', 'car', 'bus', 'truck']
    # Set required parameters for training of SSD
    img_height = 512
    img_width = 512
    img_channels = 3  # Colour image
    mean_color = [123, 117, 104]  # DO NOT CHANGE
    swap_channels = [2, 1, 0]  # Original SSD used BGR
    n_classes = 3  # 80 for COCO
    scales_coco = [0.04, 0.1, 0.26, 0.42, 0.58, 0.74, 0.9, 1.06]
    scales = scales_coco
    aspect_ratios = [[1.0, 2.0, 0.5], [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0],
                     [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0],
                     [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0],
                     [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0], [1.0, 2.0, 0.5],
                     [1.0, 2.0, 0.5]]
    two_boxes_for_ar1 = True
    steps = [8, 16, 32, 64, 128, 256, 512]
    offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
    clip_boxes = False
    variances = [0.1, 0.1, 0.2, 0.2]
    normalize_coords = True
    K.clear_session()

    model = ssd_512(image_size=(img_height, img_width, img_channels),
                    n_classes=n_classes,
                    mode='training',
                    l2_regularization=0.0005,
                    scales=scales,
                    aspect_ratios_per_layer=aspect_ratios,
                    two_boxes_for_ar1=two_boxes_for_ar1,
                    steps=steps,
                    offsets=offsets,
                    clip_boxes=clip_boxes,
                    variances=variances,
                    normalize_coords=normalize_coords,
                    subtract_mean=mean_color,
                    swap_channels=swap_channels)
    model.load_weights(model_path, by_name=True)

    sgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)
    ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)

    model.compile(optimizer=sgd, loss=ssd_loss.compute_loss)

    # model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,
    #                                   'L2Normalization': L2Normalization,

    #                                   'compute_loss': ssd_loss.compute_loss})
    # Create Data Generators for train and valid sets
    train_dataset = DataGenerator(load_images_into_memory=False,
                                  hdf5_dataset_path=None)
    valid_dataset = DataGenerator(load_images_into_memory=False,
                                  hdf5_dataset_path=None)
    train_dataset.parse_xml(images_dirs=[train_dataset_dir],
                            image_set_filenames=[train_set],
                            annotations_dirs=[train_annot_dir],
                            classes=classes,
                            include_classes='all',
                            exclude_truncated=False,
                            exclude_difficult=False,
                            ret=False)

    valid_dataset.parse_xml(images_dirs=[valid_dataset_dir],
                            image_set_filenames=[valid_set],
                            annotations_dirs=[valid_annot_dir],
                            classes=classes,
                            include_classes='all',
                            exclude_truncated=False,
                            exclude_difficult=False,
                            ret=False)

    # Will speed up trainig but requires more memory
    # Can comment out to avoid memory requirements
    '''
    train_dataset.create_hdf5_dataset(file_path='dataset_pascal_voc_07+12_trainval.h5',
                                      resize=False,
                                      variable_image_size=True,
                                      verbose=True)

    valid_dataset.create_hdf5_dataset(file_path='dataset_pascal_voc_07_test.h5',
                                      resize=False,
                                      variable_image_size=True,
                                      verbose=True)
    '''

    ssd_data_augmentation = SSDDataAugmentation(img_height=img_height,
                                                img_width=img_width,
                                                background=mean_color)

    convert_to_3_channels = ConvertTo3Channels()
    resize = Resize(height=img_height, width=img_width)

    predictor_sizes = [
        model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],
        model.get_layer('fc7_mbox_conf').output_shape[1:3],
        model.get_layer('conv6_2_mbox_conf').output_shape[1:3],
        model.get_layer('conv7_2_mbox_conf').output_shape[1:3],
        model.get_layer('conv8_2_mbox_conf').output_shape[1:3],
        model.get_layer('conv9_2_mbox_conf').output_shape[1:3],
        model.get_layer('conv10_2_mbox_conf').output_shape[1:3]
    ]

    ssd_input_encoder = SSDInputEncoder(img_height=img_height,
                                        img_width=img_width,
                                        n_classes=n_classes,
                                        predictor_sizes=predictor_sizes,
                                        scales=scales,
                                        aspect_ratios_per_layer=aspect_ratios,
                                        two_boxes_for_ar1=two_boxes_for_ar1,
                                        steps=steps,
                                        offsets=offsets,
                                        clip_boxes=clip_boxes,
                                        variances=variances,
                                        matching_type='multi',
                                        pos_iou_threshold=0.5,
                                        neg_iou_limit=0.5,
                                        normalize_coords=normalize_coords)

    train_generator = train_dataset.generate(
        batch_size=batch_size,
        shuffle=True,
        transformations=[ssd_data_augmentation],
        label_encoder=ssd_input_encoder,
        returns={'processed_images', 'encoded_labels'},
        keep_images_without_gt=False)

    val_generator = valid_dataset.generate(
        batch_size=batch_size,
        shuffle=False,
        transformations=[convert_to_3_channels, resize],
        label_encoder=ssd_input_encoder,
        returns={'processed_images', 'encoded_labels'},
        keep_images_without_gt=False)

    # Get the number of samples in the training and validations datasets.
    train_dataset_size = train_dataset.get_dataset_size()
    valid_dataset_size = valid_dataset.get_dataset_size()

    print("Number of images in the training dataset:\t{:>6}".format(
        train_dataset_size))
    print("Number of images in the validation dataset:\t{:>6}".format(
        valid_dataset_size))

    model_checkpoint = ModelCheckpoint(
        filepath=
        'ssd_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5',
        monitor='val_loss',
        verbose=1,
        save_best_only=True,
        save_weights_only=False,
        mode='auto',
        period=1)

    #csv_logger = CSVLogger(filename='ssd512_training_log.csv',
    #                       separator=',',
    #                       append=True)

    learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule,
                                                    verbose=1)

    terminate_on_nan = TerminateOnNaN()

    callbacks = [
        model_checkpoint, csv_logger, learning_rate_scheduler, terminate_on_nan
    ]

    #callbacks = [learning_rate_scheduler,
    #             terminate_on_nan]

    initial_epoch = 0
    final_epoch = 150  # 150
    steps_per_epoch = math.ceil(119 /
                                batch_size)  # ceil(num_samples/batch_size)

    # Training
    history = model.fit_generator(generator=train_generator,
                                  steps_per_epoch=steps_per_epoch,
                                  epochs=final_epoch,
                                  callbacks=callbacks,
                                  validation_data=val_generator,
                                  validation_steps=math.ceil(
                                      valid_dataset_size / batch_size),
                                  initial_epoch=initial_epoch)

    # Save final trained model
    model.save('trained.h5')

    # Make predictions
    predict_generator = valid_dataset.generate(
        batch_size=1,
        shuffle=True,
        transformations=[convert_to_3_channels, resize],
        label_encoder=None,
        returns={
            'processed_images', 'filenames', 'inverse_transform',
            'original_images', 'original_labels'
        },
        keep_images_without_gt=False)

    batch_images, batch_filenames, batch_inverse_transforms, batch_original_images, batch_original_labels = next(
        predict_generator)

    i = 0  # Which batch item to look at

    print("Image:", batch_filenames[i])
    print()
    print("Ground truth boxes:\n")
    print(np.array(batch_original_labels[i]))

    y_pred = model.predict(batch_images)
    y_pred_decoded = decode_detections(y_pred,
                                       confidence_thresh=0.2,
                                       iou_threshold=0.4,
                                       top_k=200,
                                       normalize_coords=normalize_coords,
                                       img_height=img_height,
                                       img_width=img_width)

    y_pred_decoded_inv = apply_inverse_transforms(y_pred_decoded,
                                                  batch_inverse_transforms)

    np.set_printoptions(precision=2, suppress=True, linewidth=90)
    print("Predicted boxes:\n")
    print('   class   conf xmin   ymin   xmax   ymax')
    print(y_pred_decoded_inv[i])

    # Set the colors for the bounding boxes
    colors = plt.cm.hsv(np.linspace(0, 1, n_classes + 1)).tolist()
    # classes = ['background', 'car', 'bus', 'truck', 'motorbike'] # Already set at start

    plt.figure(figsize=(20, 12))
    plt.imshow(batch_original_images[i])

    current_axis = plt.gca()

    for box in batch_original_labels[i]:
        xmin = box[1]
        ymin = box[2]
        xmax = box[3]
        ymax = box[4]
        label = '{}'.format(classes[int(box[0])])
        current_axis.add_patch(
            plt.Rectangle((xmin, ymin),
                          xmax - xmin,
                          ymax - ymin,
                          color='green',
                          fill=False,
                          linewidth=2))
        current_axis.text(xmin,
                          ymin,
                          label,
                          size='x-large',
                          color='white',
                          bbox={
                              'facecolor': 'green',
                              'alpha': 1.0
                          })

    for box in y_pred_decoded_inv[i]:
        xmin = box[2]
        ymin = box[3]
        xmax = box[4]
        ymax = box[5]
        color = colors[int(box[0])]
        label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])
        current_axis.add_patch(
            plt.Rectangle((xmin, ymin),
                          xmax - xmin,
                          ymax - ymin,
                          color=color,
                          fill=False,
                          linewidth=2))
        current_axis.text(xmin,
                          ymin,
                          label,
                          size='x-large',
                          color='white',
                          bbox={
                              'facecolor': color,
                              'alpha': 1.0
                          })

    plt.show()

    return
def data_generator_func(config: Dict):
    """Data Generator for training data and validation data
    
    Parameters
    ----------
    config : Dict
        Config yaml/json containing all parameter
    
    Returns
    -------
        train_dataset, val_dataset
    """
    # Init DataGenerator
    start_data = timer()
    train_dataset = DataGenerator(load_images_into_memory=config['training']
                                  ['train_load_images_into_memory'],
                                  hdf5_dataset_path=None)
    val_dataset = DataGenerator(load_images_into_memory=config['training']
                                ['validation_load_images_into_memory'],
                                hdf5_dataset_path=None)
    if config['training']['train_load_images_into_memory'] is not False:
        print("[INFO]... You have chosen to load data into memory")
    else:
        print(
            "[WARNING]... You have chosen not to load data into memory. It will still work but will be much slower"
        )

    train_img_dir = config['training']['train_img_dir']
    val_img_dir = config['training']['val_img_dir']

    train_annotation_dir = config['training']['train_annotation_dir']
    val_annotation_dir = config['training']['val_annotation_dir']

    train_image_set_filename = config['training']['train_image_set_filename']
    val_image_set_filename = config['training']['val_image_set_filename']

    classes = config['training']['classes']

    if config['training']['annotation_type'] == 'xml':
        train_dataset.parse_xml(images_dirs=[train_img_dir],
                                image_set_filenames=[train_image_set_filename],
                                annotations_dirs=[train_annotation_dir],
                                classes=classes,
                                include_classes='all',
                                exclude_truncated=False,
                                exclude_difficult=False,
                                ret=False)

        val_dataset.parse_xml(images_dirs=[val_img_dir],
                              image_set_filenames=[val_image_set_filename],
                              annotations_dirs=[val_annotation_dir],
                              classes=classes,
                              include_classes='all',
                              exclude_truncated=False,
                              exclude_difficult=True,
                              ret=False)

    if config['training']['annotation_type'] == 'csv':
        train_dataset.parse_csv(images_dir=train_img_dir,
                                labels_filename=train_annotation_dir,
                                input_format=[
                                    'image_name', 'xmin', 'xmax', 'ymin',
                                    'ymax', 'class_id'
                                ],
                                include_classes='all')

        val_dataset.parse_csv(images_dir=val_img_dir,
                              labels_filename=val_annotation_dir,
                              input_format=[
                                  'image_name', 'xmin', 'xmax', 'ymin', 'ymax',
                                  'class_id'
                              ],
                              include_classes='all')
    end_data = timer()
    print(
        f"[INFO]...Time taken by Data loading/transformation Job is {(end_data - start_data)/60:.2f} min(s)"
    )
    return train_dataset, val_dataset
Example #15
0
# The directories that contain the annotations.
annotations = "C:/Users/t_tor/Unsynced/complete_dataset/annotations/"

# The paths to the image sets.
imagenet_images = "C:/Users/t_tor/Unsynced/complete_dataset/imagenet_images.txt"
custom_images = "C:/Users/t_tor/Unsynced/complete_dataset/custom_images.txt"
coco_images = "C:/Users/t_tor/Unsynced/complete_dataset/coco_images.txt"
voc07test_images = "C:/Users/t_tor/Unsynced/complete_dataset/voc07_test.txt"

# The XML parser needs to now what object class names to look for and in which order to map them to integers.
classes = ['background', 'boat']

dataset.parse_xml(images_dirs=[images],
                  image_set_filenames=[custom_images],
                  annotations_dirs=[annotations],
                  classes=classes,
                  include_classes='all',
                  exclude_truncated=False,
                  exclude_difficult=True,
                  ret=False)

convert_to_3_channels = ConvertTo3Channels()
resize = Resize(height=img_height, width=img_width)

predictor_sizes = [
    model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],
    model.get_layer('fc7_mbox_conf').output_shape[1:3],
    model.get_layer('conv6_2_mbox_conf').output_shape[1:3],
    model.get_layer('conv7_2_mbox_conf').output_shape[1:3],
    model.get_layer('conv8_2_mbox_conf').output_shape[1:3],
    model.get_layer('conv9_2_mbox_conf').output_shape[1:3]
]
classes = [
    'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
    'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
    'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
]

classes = ['background', 'Gun', 'Knife', 'Razor', 'Shuriken']

train_dataset.parse_xml(
    images_dirs=[
        '/home/dlsaavedra/Desktop/Tesis/8.-Object_Detection/Experimento_3/Training/images'
    ],
    image_set_filenames=[
        "/home/dlsaavedra/Desktop/Tesis/8.-Object_Detection/Experimento_3/Training/train.txt"
    ],
    annotations_dirs=[
        "/home/dlsaavedra/Desktop/Tesis/8.-Object_Detection/Experimento_3/Training/anns"
    ],
    classes=classes,
    include_classes='all',
    exclude_truncated=False,
    exclude_difficult=False,
    ret=False)

val_dataset.parse_xml(
    images_dirs=[
        '/home/dlsaavedra/Desktop/Tesis/8.-Object_Detection/Experimento_3/Training/images'
    ],
    image_set_filenames=[
        "/home/dlsaavedra/Desktop/Tesis/8.-Object_Detection/Experimento_3/Training/train.txt"
    ],
Example #17
0
# The directories that contain the annotations.
fire_anno = args.annotations

# The paths to the image sets.
fire_train = os.path.join(args.image_sets, 'train.txt')
fire_test = os.path.join(args.image_sets, 'test.txt')
fire_val = os.path.join(args.image_sets, 'val.txt')

# The XML parser needs to now what object class names to look for and in which order to map them to integers.
# classes = ['background', 'fire']

train_dataset.parse_xml(images_dirs=[fire_img],
                        image_set_filenames=[fire_train],
                        annotations_dirs=[fire_anno],
                        classes=classes,
                        include_classes='all',
                        exclude_truncated=False,
                        exclude_difficult=False,
                        ret=False)
val_dataset.parse_xml(images_dirs=[fire_img],
                      image_set_filenames=[fire_val],
                      annotations_dirs=[fire_anno],
                      classes=classes,
                      include_classes='all',
                      exclude_truncated=False,
                      exclude_difficult=False,
                      ret=False)

# Optional: Convert the dataset into an HDF5 dataset. This will require more disk space, but will
# speed up the training. Doing this is not relevant in case you activated the `load_images_into_memory`
# option in the constructor, because in that cas the images are in memory already anyway. If you don't
#                        labels_filename=train_labels_filename,
#                        input_format=['image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'], # This is the order of the first six columns in the CSV file that contains the labels for your dataset. If your labels are in XML format, maybe the XML parser will be helpful, check the documentation.
#                        include_classes='all')

# val_dataset.parse_csv(images_dir=images_dir,
#                      labels_filename=val_labels_filename,
#                      input_format=['image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'],
#                      include_classes='all')

# [Ajinkya]: Using the XML parser instead

train_dataset.parse_xml(images_dirs=[training_images_dir],
                        image_set_filenames=[training_set_filename],
                        annotations_dirs=[training_annotation_dir],
                        classes=['background', 'cone'],
                        include_classes='all',
                        exclude_truncated=False,
                        exclude_difficult=False,
                        ret=False,
                        verbose=True)

val_dataset.parse_xml(images_dirs=[test_images_dir],
                      image_set_filenames=[test_set_filename],
                      annotations_dirs=[test_annotation_dir],
                      classes=['background', 'cone'],
                      include_classes='all',
                      exclude_truncated=False,
                      exclude_difficult=False,
                      ret=False,
                      verbose=True)
def _main_(args):
    print('Hello World! This is {:s}'.format(args.desc))

    # config_path = args.conf
    # with open(config_path) as config_buffer:    
    #     config = json.loads(config_buffer.read())
    #############################################################
    #   Set model parameters
    #############################################################
    img_height          = 300  # Height of the model input images
    img_width           = 300  # Width of the model input images
    img_channels        = 3  # Number of color channels of the model input images
    mean_color          = [123, 117, 104]  # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights.
    swap_channels       = [2, 1, 0]  # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images.
    n_classes           = 20  # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO
    scales_pascal       = [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05]  # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets
    scales_coco         = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05]  # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets
    scales              = scales_pascal
    aspect_ratios       = [[1.0, 2.0, 0.5],
                           [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0],
                           [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0],
                           [1.0, 2.0, 0.5, 3.0, 1.0 / 3.0],
                           [1.0, 2.0, 0.5],
                           [1.0, 2.0, 0.5]]  # The anchor box aspect ratios used in the original SSD300; the order matters
    two_boxes_for_ar1   = True
    steps               = [8, 16, 32, 64, 100, 300]  # The space between two adjacent anchor box center points for each predictor layer.
    offsets             = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5]  # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.
    clip_boxes          = False  # Whether or not to clip the anchor boxes to lie entirely within the image boundaries
    variances           = [0.1, 0.1, 0.2, 0.2]  # The variances by which the encoded target coordinates are divided as in the original implementation
    normalize_coords    = True

    #############################################################
    #   Create the model
    #############################################################
    # 1: Build the Keras model.
    model = ssd_300(image_size=(img_height, img_width, img_channels),
                    n_classes=n_classes,
                    mode='training',
                    l2_regularization=0.0005,
                    scales=scales,
                    aspect_ratios_per_layer=aspect_ratios,
                    two_boxes_for_ar1=two_boxes_for_ar1,
                    steps=steps,
                    offsets=offsets,
                    clip_boxes=clip_boxes,
                    variances=variances,
                    normalize_coords=normalize_coords,
                    subtract_mean=mean_color,
                    swap_channels=swap_channels)
    # 2: Load some weights into the model.

    # 3: Instantiate an optimizer and the SSD loss function and compile the model.
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
    model.compile(optimizer=adam, loss=ssd_loss.compute_loss)

    #############################################################
    #   Prepare the data
    #############################################################
    # 1: Instantiate two `DataGenerator` objects: One for training, one for validation.
    train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
    val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)

    # 2: Parse the image and label lists for the training and validation datasets. This can take a while.
    VOC_2007_images_dir = '/home/minhnc-lab/WORKSPACES/AI/data/VOC/VOCtrainval_06-Nov-2007/VOCdevkit/VOC2007/JPEGImages'
    VOC_2007_annotations_dir = '/home/minhnc-lab/WORKSPACES/AI/data/VOC/VOCtrainval_06-Nov-2007/VOCdevkit/VOC2007/Annotations'
    VOC_2007_train_image_set_filename = '/home/minhnc-lab/WORKSPACES/AI/data/VOC/VOCtrainval_06-Nov-2007/VOCdevkit/VOC2007/ImageSets/Main/train.txt'
    VOC_2007_val_image_set_filename = '/home/minhnc-lab/WORKSPACES/AI/data/VOC/VOCtrainval_06-Nov-2007/VOCdevkit/VOC2007/ImageSets/Main/val.txt'
    # VOC_2007_trainval_image_set_filename = '/home/minhnc-lab/WORKSPACES/AI/data/VOC/VOCtrainval_06-Nov-2007/VOCdevkit/VOC2007/ImageSets/Main/trainval.txt'
    # VOC_2007_test_image_set_filename = '/home/minhnc-lab/WORKSPACES/AI/data/VOC/VOCtest_06-Nov-2007/VOCdevkit/VOC2007/ImageSets/Main/test.txt'

    classes = ['background',
               'aeroplane', 'bicycle', 'bird', 'boat',
               'bottle', 'bus', 'car', 'cat',
               'chair', 'cow', 'diningtable', 'dog',
               'horse', 'motorbike', 'person', 'pottedplant',
               'sheep', 'sofa', 'train', 'tvmonitor']

    train_dataset.parse_xml(images_dirs=[VOC_2007_images_dir],
                            image_set_filenames=[VOC_2007_train_image_set_filename],
                            annotations_dirs=[VOC_2007_annotations_dir],
                            classes=classes,
                            include_classes='all',
                            exclude_truncated=False,
                            exclude_difficult=False,
                            ret=False)
    val_dataset.parse_xml(images_dirs=[VOC_2007_images_dir],
                          image_set_filenames=[VOC_2007_val_image_set_filename],
                          annotations_dirs=[VOC_2007_annotations_dir],
                          classes=classes,
                          include_classes='all',
                          exclude_truncated=False,
                          exclude_difficult=True,
                          ret=False)

    train_dataset.create_hdf5_dataset(file_path='dataset_pascal_voc_07+12_trainval.h5',
                                      resize=False,
                                      variable_image_size=True,
                                      verbose=True)

    val_dataset.create_hdf5_dataset(file_path='dataset_pascal_voc_07_test.h5',
                                    resize=False,
                                    variable_image_size=True,
                                    verbose=True)
    # 3: Set the batch size.
    batch_size = 8  # Change the batch size if you like, or if you run into GPU memory issues.

    # 4: Set the image transformations for pre-processing and data augmentation options.
    ssd_data_augmentation = SSDDataAugmentation(img_height=img_height,
                                                img_width=img_width,
                                                background=mean_color)
    convert_to_3_channels = ConvertTo3Channels()
    resize = Resize(height=img_height, width=img_width)

    # 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.
    predictor_sizes = [model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],
                       model.get_layer('fc7_mbox_conf').output_shape[1:3],
                       model.get_layer('conv6_2_mbox_conf').output_shape[1:3],
                       model.get_layer('conv7_2_mbox_conf').output_shape[1:3],
                       model.get_layer('conv8_2_mbox_conf').output_shape[1:3],
                       model.get_layer('conv9_2_mbox_conf').output_shape[1:3]]

    ssd_input_encoder = SSDInputEncoder(img_height=img_height,
                                        img_width=img_width,
                                        n_classes=n_classes,
                                        predictor_sizes=predictor_sizes,
                                        scales=scales,
                                        aspect_ratios_per_layer=aspect_ratios,
                                        two_boxes_for_ar1=two_boxes_for_ar1,
                                        steps=steps,
                                        offsets=offsets,
                                        clip_boxes=clip_boxes,
                                        variances=variances,
                                        matching_type='multi',
                                        pos_iou_threshold=0.5,
                                        neg_iou_limit=0.5,
                                        normalize_coords=normalize_coords)

    # 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.
    train_generator = train_dataset.generate(batch_size=batch_size,
                                             shuffle=True,
                                             transformations=[ssd_data_augmentation],
                                             label_encoder=ssd_input_encoder,
                                             returns={'processed_images',
                                                      'encoded_labels'},
                                             keep_images_without_gt=False)

    val_generator = val_dataset.generate(batch_size=batch_size,
                                         shuffle=False,
                                         transformations=[convert_to_3_channels,
                                                          resize],
                                         label_encoder=ssd_input_encoder,
                                         returns={'processed_images',
                                                  'encoded_labels'},
                                         keep_images_without_gt=False)

    # Get the number of samples in the training and validations datasets.
    train_dataset_size = train_dataset.get_dataset_size()
    val_dataset_size = val_dataset.get_dataset_size()

    print("Number of images in the training dataset:\t{:>6}".format(train_dataset_size))
    print("Number of images in the validation dataset:\t{:>6}".format(val_dataset_size))

    #############################################################
    #   Kick off the training
    #############################################################
    # Define model callbacks.
    model_checkpoint = ModelCheckpoint(
        filepath='ssd300_pascal_07+12_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5',
        monitor='val_loss',
        verbose=1,
        save_best_only=True,
        save_weights_only=False,
        mode='auto',
        period=1)

    csv_logger = CSVLogger(filename='ssd300_pascal_07+12_training_log.csv',
                           separator=',',
                           append=True)

    learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule,
                                                    verbose=1)

    terminate_on_nan = TerminateOnNaN()

    callbacks = [model_checkpoint,
                 csv_logger,
                 learning_rate_scheduler,
                 terminate_on_nan]

    # Train
    initial_epoch = 0
    final_epoch = 120
    steps_per_epoch = 1000

    history = model.fit_generator(generator=train_generator,
                                  steps_per_epoch=steps_per_epoch,
                                  epochs=final_epoch,
                                  callbacks=callbacks,
                                  validation_data=val_generator,
                                  validation_steps=ceil(val_dataset_size / batch_size),
                                  initial_epoch=initial_epoch)

    #############################################################
    #   Run the evaluation
    #############################################################
    # 1: Set the generator for the predictions.
    predict_generator = val_dataset.generate(batch_size=1,
                                             shuffle=True,
                                             transformations=[convert_to_3_channels,
                                                              resize],
                                             label_encoder=None,
                                             returns={'processed_images',
                                                      'filenames',
                                                      'inverse_transform',
                                                      'original_images',
                                                      'original_labels'},
                                             keep_images_without_gt=False)

    # 2: Generate samples.
    batch_images, batch_filenames, batch_inverse_transforms, batch_original_images, batch_original_labels = next(
        predict_generator)

    i = 0  # Which batch item to look at

    print("Image:", batch_filenames[i])
    print()
    print("Ground truth boxes:\n")
    print(np.array(batch_original_labels[i]))

    # 3: Make predictions.
    y_pred = model.predict(batch_images)

    # 4: Decode the raw predictions in `y_pred`.
    y_pred_decoded = decode_detections(y_pred,
                                       confidence_thresh=0.5,
                                       iou_threshold=0.4,
                                       top_k=200,
                                       normalize_coords=normalize_coords,
                                       img_height=img_height,
                                       img_width=img_width)

    # 5: Convert the predictions for the original image.
    y_pred_decoded_inv = apply_inverse_transforms(y_pred_decoded, batch_inverse_transforms)
    np.set_printoptions(precision=2, suppress=True, linewidth=90)
    print("Predicted boxes:\n")
    print('   class   conf xmin   ymin   xmax   ymax')
    print(y_pred_decoded_inv[i])

    # 6: Draw the predicted boxes onto the image
    # Set the colors for the bounding boxes
    colors = plt.cm.hsv(np.linspace(0, 1, n_classes + 1)).tolist()
    classes = ['background',
               'aeroplane', 'bicycle', 'bird', 'boat',
               'bottle', 'bus', 'car', 'cat',
               'chair', 'cow', 'diningtable', 'dog',
               'horse', 'motorbike', 'person', 'pottedplant',
               'sheep', 'sofa', 'train', 'tvmonitor']

    plt.figure(figsize=(20, 12))
    plt.imshow(batch_original_images[i])

    current_axis = plt.gca()

    for box in batch_original_labels[i]:
        xmin = box[1]
        ymin = box[2]
        xmax = box[3]
        ymax = box[4]
        label = '{}'.format(classes[int(box[0])])
        current_axis.add_patch(
            plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, color='green', fill=False, linewidth=2))
        current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor': 'green', 'alpha': 1.0})

    for box in y_pred_decoded_inv[i]:
        xmin = box[2]
        ymin = box[3]
        xmax = box[4]
        ymax = box[5]
        color = colors[int(box[0])]
        label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])
        current_axis.add_patch(
            plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, color=color, fill=False, linewidth=2))
        current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor': color, 'alpha': 1.0})
def _main_(args):

    config_path = args.conf

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    ###############################
    #   Parse the annotations
    ###############################
    path_imgs_test = config['test']['test_image_folder']
    path_anns_test = config['test']['test_annot_folder']
    labels = config['model']['labels']
    categories = {}
    #categories = {"Razor": 1, "Gun": 2, "Knife": 3, "Shuriken": 4} #la categoría 0 es la background
    for i in range(len(labels)):
        categories[labels[i]] = i + 1
    print('\nTraining on: \t' + str(categories) + '\n')

    img_height = config['model']['input']  # Height of the model input images
    img_width = config['model']['input']  # Width of the model input images
    img_channels = 3  # Number of color channels of the model input images
    n_classes = len(
        labels
    )  # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO
    classes = ['background'] + labels

    model_mode = 'training'
    # TODO: Set the path to the `.h5` file of the model to be loaded.
    model_path = config['train']['saved_weights_name']

    # We need to create an SSDLoss object in order to pass that to the model loader.
    ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)

    K.clear_session()  # Clear previous models from memory.

    model = load_model(model_path,
                       custom_objects={
                           'AnchorBoxes': AnchorBoxes,
                           'L2Normalization': L2Normalization,
                           'DecodeDetections': DecodeDetections,
                           'compute_loss': ssd_loss.compute_loss
                       })

    test_dataset = DataGenerator()
    test_dataset.parse_xml(
        images_dirs=[config['test']['test_image_folder']],
        image_set_filenames=[config['test']['test_image_set_filename']],
        annotations_dirs=[config['test']['test_annot_folder']],
        classes=classes,
        include_classes='all',
        exclude_truncated=False,
        exclude_difficult=False,
        ret=False)
    evaluator = Evaluator(model=model,
                          n_classes=n_classes,
                          data_generator=test_dataset,
                          model_mode=model_mode)

    results = evaluator(img_height=img_height,
                        img_width=img_width,
                        batch_size=4,
                        data_generator_mode='resize',
                        round_confidences=False,
                        matching_iou_threshold=0.5,
                        border_pixels='include',
                        sorting_algorithm='quicksort',
                        average_precision_mode='sample',
                        num_recall_points=11,
                        ignore_neutral_boxes=True,
                        return_precisions=True,
                        return_recalls=True,
                        return_average_precisions=True,
                        verbose=True)

    mean_average_precision, average_precisions, precisions, recalls = results

    total_instances = []
    precisions = []
    for i in range(1, len(average_precisions)):
        print('{:.0f} instances of class'.format(len(recalls[i])), classes[i],
              'with average precision: {:.4f}'.format(average_precisions[i]))
        total_instances.append(len(recalls[i]))
        precisions.append(average_precisions[i])

    if sum(total_instances) == 0:
        print('No test instances found.')
        return

    print('mAP using the weighted average of precisions among classes: {:.4f}'.
          format(
              sum([a * b for a, b in zip(total_instances, precisions)]) /
              sum(total_instances)))
    print('mAP: {:.4f}'.format(
        sum(precisions) / sum(x > 0 for x in total_instances)))

    for i in range(1, len(average_precisions)):
        print("{:<14}{:<6}{}".format(classes[i], 'AP',
                                     round(average_precisions[i], 3)))
    print()
    print("{:<14}{:<6}{}".format('', 'mAP', round(mean_average_precision, 3)))
# VOC_2007_test_image_set_filename     = '/home/ogai1234/datasets/VOCdevkit/VOC2007/ImageSets/Main/test.txt'

# The XML parser needs to now what object class names to look for and in which order to map them to integers.
classes = ['background',
           'aeroplane', 'bicycle', 'bird', 'boat',
           'bottle', 'bus', 'car', 'cat',
           'chair', 'cow', 'diningtable', 'dog',
           'horse', 'motorbike', 'person', 'pottedplant',
           'sheep', 'sofa', 'train', 'tvmonitor']

train_dataset.parse_xml(images_dirs=[VOC_2007_images_dir,
                                     VOC_2012_images_dir],
                        image_set_filenames=[VOC_2007_trainval_image_set_filename,
                                             VOC_2012_trainval_image_set_filename],
                        annotations_dirs=[VOC_2007_annotations_dir,
                                          VOC_2012_annotations_dir],
                        classes=classes,
                        include_classes='all',
                        exclude_truncated=False,
                        exclude_difficult=False,
                        ret=False)

val_dataset.parse_xml(images_dirs=[VOC_2007_images_dir],
                      image_set_filenames=[VOC_2007_val_image_set_filename],
                      annotations_dirs=[VOC_2007_annotations_dir],
                      classes=classes,
                      include_classes='all',
                      exclude_truncated=False,
                      exclude_difficult=True,
                      ret=False)
Example #22
0
def main():

    # Set a few configuration parameters.
    img_height = 300
    img_width = 300
    n_classes = 20
    model_mode = 'training'

    # Set the path to the `.h5` file of the model to be loaded.
    model_file = file_io.FileIO('gs://deeplearningteam11/vgg19BNmodel.h5',
                                mode='rb')

    # Store model locally on instance
    model_path = 'model.h5'
    with open(model_path, 'wb') as f:
        f.write(model_file.read())
    model_file.close()

    data_dir = "gs://deeplearningteam11/data"
    os.system("gsutil -m cp -r " + data_dir + "  " +
              os.path.dirname(__file__) + " > /dev/null 2>&1 ")
    # We need to create an SSDLoss object in order to pass that to the model loader.
    ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)

    K.clear_session()  # Clear previous models from memory.

    model = load_model(model_path,
                       custom_objects={
                           'AnchorBoxes': AnchorBoxes,
                           'L2Normalization': L2Normalization,
                           'DecodeDetections': DecodeDetections,
                           'compute_loss': ssd_loss.compute_loss
                       })

    model.summary()

    te_dataset = DataGenerator(load_images_into_memory=True)
    tr_dataset = DataGenerator(load_images_into_memory=True)

    # TODO: Set the paths to the dataset here.
    tr_Pascal_VOC_dataset_images_dir = os.path.dirname(
        __file__) + "/" + "data/data/VOC2007/train/JPEGImages/"
    tr_Pascal_VOC_dataset_annotations_dir = os.path.dirname(
        __file__) + "/" + "data/data/VOC2007/train/Annotations/"
    tr_Pascal_VOC_dataset_image_set_filename = os.path.dirname(
        __file__) + "/" + "data/data/VOC2007/train/ImageSets/Main/trainval.txt"

    te_Pascal_VOC_dataset_images_dir = os.path.dirname(
        __file__) + "/" + "data/data/VOC2007/test/JPEGImages/"
    te_Pascal_VOC_dataset_annotations_dir = os.path.dirname(
        __file__) + "/" + "data/data/VOC2007/test/Annotations/"
    te_Pascal_VOC_dataset_image_set_filename = os.path.dirname(
        __file__) + "/" + "data/data/VOC2007/test/ImageSets/Main/test.txt"

    # The XML parser needs to now what object class names to look for and in which order to map them to integers.
    classes = [
        'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
        'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
        'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
        'tvmonitor'
    ]

    with tf.device('/device:GPU:0'):
        # Testing results
        te_dataset.parse_xml(
            images_dirs=[te_Pascal_VOC_dataset_images_dir],
            image_set_filenames=[te_Pascal_VOC_dataset_image_set_filename],
            annotations_dirs=[te_Pascal_VOC_dataset_annotations_dir],
            classes=classes,
            include_classes='all',
            exclude_truncated=False,
            exclude_difficult=True,
            ret=False,
            verbose=False)

        te_evaluator = Evaluator(model=model,
                                 n_classes=n_classes,
                                 data_generator=te_dataset,
                                 model_mode=model_mode)

        te_results = te_evaluator(img_height=img_height,
                                  img_width=img_width,
                                  batch_size=64,
                                  data_generator_mode='resize',
                                  round_confidences=False,
                                  matching_iou_threshold=0.5,
                                  border_pixels='include',
                                  sorting_algorithm='quicksort',
                                  average_precision_mode='sample',
                                  num_recall_points=11,
                                  ignore_neutral_boxes=True,
                                  return_precisions=True,
                                  return_recalls=True,
                                  return_average_precisions=True,
                                  verbose=False)

        mean_average_precision, average_precisions, precisions, recalls = te_results

        for i in range(1, len(average_precisions)):
            print("{:<14}{:<6}{}".format(classes[i], 'AP',
                                         round(average_precisions[i], 3)))
        print()
        print("{:<14}{:<6}{}".format('', 'mAP', round(mean_average_precision,
                                                      3)))

        print('TRAIN')
        tr_dataset.parse_xml(
            images_dirs=[tr_Pascal_VOC_dataset_images_dir],
            image_set_filenames=[tr_Pascal_VOC_dataset_image_set_filename],
            annotations_dirs=[tr_Pascal_VOC_dataset_annotations_dir],
            classes=classes,
            include_classes='all',
            exclude_truncated=False,
            exclude_difficult=True,
            ret=False,
            verbose=False)

        # Training results
        tr_evaluator = Evaluator(model=model,
                                 n_classes=n_classes,
                                 data_generator=tr_dataset,
                                 model_mode=model_mode)

        tr_results = tr_evaluator(img_height=img_height,
                                  img_width=img_width,
                                  batch_size=64,
                                  data_generator_mode='resize',
                                  round_confidences=False,
                                  matching_iou_threshold=0.5,
                                  border_pixels='include',
                                  sorting_algorithm='quicksort',
                                  average_precision_mode='sample',
                                  num_recall_points=11,
                                  ignore_neutral_boxes=True,
                                  return_precisions=True,
                                  return_recalls=True,
                                  return_average_precisions=True,
                                  verbose=False)

        mean_average_precision, average_precisions, precisions, recalls = tr_results

        for i in range(1, len(average_precisions)):
            print("{:<14}{:<6}{}".format(classes[i], 'AP',
                                         round(average_precisions[i], 3)))
        print()
        print("{:<14}{:<6}{}".format('', 'mAP', round(mean_average_precision,
                                                      3)))
Example #23
0
#  Setup Data Generator
# 1: Instantiate two `DataGenerator` objects: One for training, one for validation.

# Optional: If you have enough memory, consider loading the images into memory for the reasons explained above.

train_dataset = DataGenerator(load_images_into_memory=False,
                              hdf5_dataset_path=None)
val_dataset = DataGenerator(load_images_into_memory=False,
                            hdf5_dataset_path=None)

# 2: Parse the image and label lists for the training and validation datasets. This can take a while.

train_dataset.parse_xml(images_dirs=train_img_paths,
                        image_set_filenames=train_img_set_paths,
                        annotations_dirs=train_annot_paths,
                        classes=classes,
                        include_classes='all',
                        exclude_truncated=False,
                        exclude_difficult=False,
                        ret=False)

val_dataset.parse_xml(images_dirs=valid_img_paths,
                      image_set_filenames=valid_img_set_paths,
                      annotations_dirs=valid_annot_paths,
                      classes=classes,
                      include_classes='all',
                      exclude_truncated=False,
                      exclude_difficult=True,
                      ret=False)

# 3: Set the image transformations for pre-processing and data augmentation options.
Example #24
0
def main(job_dir, **args):
    ##Setting up the path for saving logs
    logs_dir = job_dir + 'logs/'
    data_dir = "gs://deeplearningteam11/data"

    print("Current Directory: " + os.path.dirname(__file__))
    print("Lets copy the data to: " + os.path.dirname(__file__))
    os.system("gsutil -m cp -r " + data_dir + "  " +
              os.path.dirname(__file__) + " > /dev/null 2>&1 ")
    #exit(0)

    with tf.device('/device:GPU:0'):
        # 1: Build the Keras model.
        K.clear_session()  # Clear previous models from memory.
        model = ssd_300(image_size=(img_height, img_width, img_channels),
                        n_classes=n_classes,
                        mode='training',
                        l2_regularization=0.0005,
                        scales=scales,
                        aspect_ratios_per_layer=aspect_ratios,
                        two_boxes_for_ar1=two_boxes_for_ar1,
                        steps=steps,
                        offsets=offsets,
                        clip_boxes=clip_boxes,
                        variances=variances,
                        normalize_coords=normalize_coords,
                        subtract_mean=mean_color,
                        swap_channels=swap_channels)

        # Set the path to the `.h5` file of the model to be loaded.
        model_file = file_io.FileIO('gs://deeplearningteam11/vgg19BNmodel.h5',
                                    mode='rb')

        # Store model locally on instance
        model_path = 'model.h5'
        with open(model_path, 'wb') as f:
            f.write(model_file.read())
        model_file.close()

        ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)

        model = load_model(model_path,
                           custom_objects={
                               'AnchorBoxes': AnchorBoxes,
                               'L2Normalization': L2Normalization,
                               'DecodeDetections': DecodeDetections,
                               'compute_loss': ssd_loss.compute_loss
                           })

        for layer in model.layers:
            layer.trainable = True

        model.summary()

        # 1: Instantiate two `DataGenerator` objects: One for training, one for validation.
        train_dataset = DataGenerator(load_images_into_memory=True,
                                      hdf5_dataset_path=None)
        val_dataset = DataGenerator(load_images_into_memory=True,
                                    hdf5_dataset_path=None)

        # 2: Parse the image and label lists for the training and validation datasets. This can take a while.
        #  VOC 2007
        #  The directories that contain the images.
        VOC_2007_train_images_dir = 'data/data/VOC2007/train/JPEGImages/'
        VOC_2007_test_images_dir = 'data/data/VOC2007/test/JPEGImages/'

        VOC_2007_train_anns_dir = 'data/data/VOC2007/train/Annotations/'
        VOC_2007_test_anns_dir = 'data/data/VOC2007/test/Annotations/'

        # The paths to the image sets.
        VOC_2007_trainval_image_set_dir = 'data/data/VOC2007/train/ImageSets/Main/'
        VOC_2007_test_image_set_dir = 'data/data/VOC2007/test/ImageSets/Main/'

        VOC_2007_train_images_dir = os.path.dirname(
            __file__) + "/" + VOC_2007_train_images_dir
        VOC_2007_test_images_dir = os.path.dirname(
            __file__) + "/" + VOC_2007_test_images_dir

        VOC_2007_train_anns_dir = os.path.dirname(
            __file__) + "/" + VOC_2007_train_anns_dir
        VOC_2007_test_anns_dir = os.path.dirname(
            __file__) + "/" + VOC_2007_test_anns_dir

        VOC_2007_trainval_image_set_dir = os.path.dirname(
            __file__) + "/" + VOC_2007_trainval_image_set_dir
        VOC_2007_test_image_set_dir = os.path.dirname(
            __file__) + "/" + VOC_2007_test_image_set_dir

        VOC_2007_trainval_image_set_filename = VOC_2007_trainval_image_set_dir + '/trainval.txt'
        VOC_2007_test_image_set_filename = VOC_2007_test_image_set_dir + '/test.txt'

        # The XML parser needs to now what object class names to look for and in which order to map them to integers.
        classes = [
            'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
            'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
            'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
            'tvmonitor'
        ]

        print("Parsing Training Data ...")
        train_dataset.parse_xml(
            images_dirs=[VOC_2007_train_images_dir],
            image_set_filenames=[VOC_2007_trainval_image_set_filename],
            annotations_dirs=[VOC_2007_train_anns_dir],
            classes=classes,
            include_classes='all',
            exclude_truncated=False,
            exclude_difficult=False,
            ret=False,
            verbose=False)
        print("Done")
        print(
            "================================================================")

        print("Parsing Test Data ...")
        val_dataset.parse_xml(
            images_dirs=[VOC_2007_test_images_dir],
            image_set_filenames=[VOC_2007_test_image_set_filename],
            annotations_dirs=[VOC_2007_test_anns_dir],
            classes=classes,
            include_classes='all',
            exclude_truncated=False,
            exclude_difficult=True,
            ret=False,
            verbose=False)
        print("Done")
        print(
            "================================================================")

        # 3: Set the batch size.
        batch_size = 32  # Change the batch size if you like, or if you run into GPU memory issues.

        #  4: Set the image transformations for pre-processing and data augmentation options.

        # For the training generator:
        ssd_data_augmentation = SSDDataAugmentation(img_height=img_height,
                                                    img_width=img_width,
                                                    background=mean_color)

        # For the validation generator:
        convert_to_3_channels = ConvertTo3Channels()
        resize = Resize(height=img_height, width=img_width)

        # 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.

        # The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.
        predictor_sizes = [
            model.get_layer('conv4_4_norm_mbox_conf').output_shape[1:3],
            model.get_layer('fc7_mbox_conf').output_shape[1:3],
            model.get_layer('conv8_2_mbox_conf').output_shape[1:3],
            model.get_layer('conv9_2_mbox_conf').output_shape[1:3],
            model.get_layer('conv10_2_mbox_conf').output_shape[1:3],
            model.get_layer('conv11_2_mbox_conf').output_shape[1:3]
        ]

        ssd_input_encoder = SSDInputEncoder(
            img_height=img_height,
            img_width=img_width,
            n_classes=n_classes,
            predictor_sizes=predictor_sizes,
            scales=scales,
            aspect_ratios_per_layer=aspect_ratios,
            two_boxes_for_ar1=two_boxes_for_ar1,
            steps=steps,
            offsets=offsets,
            clip_boxes=clip_boxes,
            variances=variances,
            matching_type='multi',
            pos_iou_threshold=0.5,
            neg_iou_limit=0.5,
            normalize_coords=normalize_coords)

        # 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.

        train_generator = train_dataset.generate(
            batch_size=batch_size,
            shuffle=True,
            transformations=[ssd_data_augmentation],
            label_encoder=ssd_input_encoder,
            returns={'processed_images', 'encoded_labels'},
            keep_images_without_gt=False)

        val_generator = val_dataset.generate(
            batch_size=batch_size,
            shuffle=False,
            transformations=[convert_to_3_channels, resize],
            label_encoder=ssd_input_encoder,
            returns={'processed_images', 'encoded_labels'},
            keep_images_without_gt=False)

        # Get the number of samples in the training and validations datasets.
        train_dataset_size = train_dataset.get_dataset_size()
        val_dataset_size = val_dataset.get_dataset_size()

        print("Number of images in the training dataset:\t{:>6}".format(
            train_dataset_size))
        print("Number of images in the validation dataset:\t{:>6}".format(
            val_dataset_size))

        # Define a learning rate schedule.

        def lr_schedule(epoch):
            return 1e-6
            # if epoch < 80:
            #     return 0.001
            # elif epoch < 100:
            #     return 0.0001
            # else:
            #     return 0.00001

        learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule,
                                                        verbose=1)

        terminate_on_nan = TerminateOnNaN()

        callbacks = [learning_rate_scheduler, terminate_on_nan]

        # If you're resuming a previous training, set `initial_epoch` and `final_epoch` accordingly.
        initial_epoch = 120
        final_epoch = 200
        steps_per_epoch = 500

        history = model.fit_generator(generator=train_generator,
                                      steps_per_epoch=steps_per_epoch,
                                      epochs=final_epoch,
                                      callbacks=callbacks,
                                      validation_data=val_generator,
                                      validation_steps=ceil(val_dataset_size /
                                                            batch_size),
                                      initial_epoch=initial_epoch)

        model_name = "vgg19BNmodel_cont.h5"
        model.save(model_name)
        with file_io.FileIO(model_name, mode='rb') as input_f:
            with file_io.FileIO("gs://deeplearningteam11/" + model_name,
                                mode='w+') as output_f:
                output_f.write(input_f.read())
Example #25
0
if Dataset_Build == 'New_Dataset':
    # 1: Instantiate two `DataGenerator` objects: One for training, one for validation.

    # Optional: If you have enough memory, consider loading the images into memory for the reasons explained above.

    train_dataset = DataGenerator(dataset='train', load_images_into_memory=False, hdf5_dataset_path=None)
    val_dataset = DataGenerator(dataset='val', load_images_into_memory=False, hdf5_dataset_path=None)

    # 2: Parse the image and label lists for the training and validation datasets. This can take a while.
    # images_dirs, image_set_filenames, and annotations_dirs should have the same length
    train_dataset.parse_xml(images_dirs=[train_source_images_dir],
                            target_images_dirs=[train_target_images_dir],
                            image_set_filenames=[train_source_image_set_filename],
                            target_image_set_filenames=[train_target_image_set_filename],
                            annotations_dirs=[train_annotation_dir],
                            classes=train_classes,
                            include_classes=train_include_classes,
                            exclude_truncated=False,
                            exclude_difficult=False,
                            ret=False)

    val_dataset.parse_xml(images_dirs=[test_target_images_dir],
                          image_set_filenames=[test_target_image_set_filename],
                          annotations_dirs=[test_annotation_dir],
                          classes=val_classes,
                          include_classes=val_include_classes,
                          exclude_truncated=False,
                          exclude_difficult=True,
                          ret=False)

    # Optional: Convert the dataset into an HDF5 dataset. This will require more disk space, but will
Example #26
0
classes = ['background', 'borowka_ok']

anotations_borowki_ok_test = 'datasets/Oznaczone_borowki_rgb/borowka_rgb_ok_test'
anotations_borowki_nok_test = 'datasets/Oznaczone_borowki_rgb/borowka_rgb_nok_test'
anotations_borowki_ok_train = 'datasets/Oznaczone_borowki_rgb/borowka_rgb_ok'
anotations_borowki_nok_train = 'datasets/Oznaczone_borowki_rgb/borowka_rgb_nok'

set_borowki_ok_test = 'datasets/Oznaczone_borowki_rgb/borowka_rgb_ok_test/borowka_ok_test.txt'
set_borowki_nok_test = 'datasets/Oznaczone_borowki_rgb/borowka_rgb_nok_test/borowka_nok_test.txt'
set_borowki_ok_train = 'datasets/Oznaczone_borowki_rgb/borowka_rgb_ok/borowka_ok_train.txt'
set_borowki_nok_train = 'datasets/Oznaczone_borowki_rgb/borowka_rgb_nok/borowka_nok_train.txt'

dataset.parse_xml(images_dirs=[anotations_borowki_ok_test],
                  image_set_filenames=[set_borowki_ok_test],
                  annotations_dirs=[anotations_borowki_ok_test],
                  classes=classes,
                  include_classes='all',
                  exclude_truncated=False,
                  exclude_difficult=True,
                  ret=False)

evaluator = Evaluator(model=model,
                      n_classes=n_classes,
                      data_generator=dataset,
                      model_mode=model_mode)

results = evaluator(img_height=img_height,
                    img_width=img_width,
                    batch_size=8,
                    data_generator_mode='resize',
                    round_confidences=False,
                    matching_iou_threshold=0.5,
    # The paths to the image sets.
    train_image_set_filename = '/home/docker/Jessi/smart-traffic-sensor-lab/train.txt'
    test_image_set_filename = '/home/docker/Jessi/smart-traffic-sensor-lab/test.txt'

    classes = [
        'None', 'motorcycle', 'car', 'van', 'bus', 'truck', 'small-truck',
        'tank-truck'
    ]

    batch_size = 8

    train_dataset.parse_xml(images_dirs=[images_dir],
                            image_set_filenames=[train_image_set_filename],
                            annotations_dirs=[annotations_dir],
                            classes=classes,
                            include_classes='all',
                            exclude_truncated=False,
                            exclude_difficult=False,
                            ret=False)
    # 6: Create the validation set batch generator (if you want to use a validation dataset)

    val_dataset.parse_xml(images_dirs=[images_dir],
                          image_set_filenames=[test_image_set_filename],
                          annotations_dirs=[annotations_dir],
                          classes=classes,
                          include_classes='all',
                          exclude_truncated=False,
                          exclude_difficult=False,
                          ret=False)

    # Optional: Convert the dataset into an HDF5 dataset. This will require more disk space, but will
Example #28
0
	def load_VOC_IMG_generators(self,model):
		print('Making VOC image generators')
		datadir = self.datas['DATA_PATH']
		train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
		val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
		test_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
		images_dir                   = os.path.join(datadir,'Images')
		annotations_dir              = os.path.join(datadir,'Annotations')
		train_image_set_filename  = os.path.join(datadir,'ImageSets','train.txt')
		val_image_set_filename      = os.path.join(datadir,'ImageSets','val.txt')
		test_image_set_filename      = os.path.join(datadir,'ImageSets','test.txt')
		generator_options = self.datas['GENERATOR']

		train_dataset.parse_xml(images_dirs=[images_dir],
		                        image_set_filenames=[train_image_set_filename],
		                        annotations_dirs=[annotations_dir],
		                        classes=self.datas['CLASSES'],
		                        include_classes='all',
		                        exclude_truncated=False,
		                        exclude_difficult=False,
		                        ret=False)
		val_dataset.parse_xml(images_dirs=[images_dir],
		                        image_set_filenames=[val_image_set_filename],
		                        annotations_dirs=[annotations_dir],
		                        classes=self.datas['CLASSES'],
		                        include_classes='all',
		                        exclude_truncated=False,
		                        exclude_difficult=False,
		                        ret=False)
		test_dataset.parse_xml(images_dirs=[images_dir],
		                        image_set_filenames=[test_image_set_filename],
		                        annotations_dirs=[annotations_dir],
		                        classes=self.datas['CLASSES'],
		                        include_classes='all',
		                        exclude_truncated=False,
		                        exclude_difficult=False,
		                        ret=False)

		convert_to_3_channels = ConvertTo3Channels()
		target_size = generator_options['TARGET_SIZE']
		resize = Resize(height=target_size[0], width=target_size[1])

		predictor_sizes = [model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],
	                       model.get_layer('fc7_mbox_conf').output_shape[1:3],
	                       model.get_layer('conv6_2_mbox_conf').output_shape[1:3],
	                       model.get_layer('conv7_2_mbox_conf').output_shape[1:3],
	                       model.get_layer('conv8_2_mbox_conf').output_shape[1:3],
	                       model.get_layer('conv9_2_mbox_conf').output_shape[1:3]]
		scales_pascal = [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets
		scales = scales_pascal
		aspect_ratios = [[1.0, 2.0, 0.5],
                     [1.0, 2.0, 0.5, 3.0, 1.0/3.0],
                     [1.0, 2.0, 0.5, 3.0, 1.0/3.0],
                     [1.0, 2.0, 0.5, 3.0, 1.0/3.0],
                     [1.0, 2.0, 0.5],
                     [1.0, 2.0, 0.5]] # The anchor box aspect ratios used in the original SSD300; the order matters
		steps = [8, 16, 32, 64, 100, 300] # The space between two adjacent anchor box center points for each predictor layer.
		two_boxes_for_ar1 = True
		mean_color=[123,117,104] #TODO : add this as a parameter
		offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
		clip_boxes=False
		variances=[0.1, 0.1, 0.2, 0.2]
		normalize_coords=True

		ssd_input_encoder = SSDInputEncoder(img_height = target_size[0],
											img_width = target_size[1],
											n_classes = 20, #TODO : handle subsampling
											predictor_sizes=predictor_sizes,
											scales=scales,
											aspect_ratios_per_layer=aspect_ratios,
											two_boxes_for_ar1=two_boxes_for_ar1,
											steps=steps,
											offsets=offsets,
											clip_boxes=clip_boxes,
											variances=variances,
											matching_type='multi',
											pos_iou_threshold=0.5,
											neg_iou_limit=0.5,
											normalize_coords=normalize_coords
											)
		train_generator = train_dataset.generate(batch_size=generator_options['BATCH_SIZE'],
												shuffle=True,
												transformations=[convert_to_3_channels,
																resize],
												label_encoder=ssd_input_encoder,
												returns={'processed_images',
														 'encoded_labels'},
												keep_images_without_gt=False)

		val_generator = val_dataset.generate(batch_size=generator_options['BATCH_SIZE'],
												shuffle=True,
												transformations=[convert_to_3_channels,
																resize],
												label_encoder=ssd_input_encoder,
												returns={'processed_images',
														 'encoded_labels'},
												keep_images_without_gt=False)

		test_generator = test_dataset.generate(batch_size=generator_options['BATCH_SIZE'],
												shuffle=True,
												transformations=[convert_to_3_channels,
																resize],
												label_encoder=ssd_input_encoder,
												returns={'processed_images',
														 'encoded_labels'},
												keep_images_without_gt=False)
		return [train_generator,train_dataset.get_dataset_size()],[val_generator,val_dataset.get_dataset_size()],[test_generator,train_dataset.get_dataset_size()]