def detectAlphabets(imageToRecognize):
    args = parse_args()
    args.val_path = imageToRecognize
    # os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    keras.backend.tensorflow_backend.set_session(get_session())
    model = keras.models.load_model(os.path.join(dir, '../snapshots/resnet50_csv_wtext.h5'), custom_objects=custom_objects)
    test_image_data_generator = keras.preprocessing.image.ImageDataGenerator()
    #
    # # create a generator for testing data
    test_generator = CSVGenerator(
        csv_data_file=args.annotations,
        csv_class_file=args.classes,
        image_data_generator=test_image_data_generator,
        batch_size=args.batch_size
    )
    # index = 0
    # load image
    image = read_image_bgr(args.val_path)

    # copy to draw on
    draw = image.copy()
    draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)

    # preprocess image for network
    image = preprocess_image(image)
    image, scale = resize_image(image)

    # process image
    start = time.time()
    _, _, detections = model.predict_on_batch(np.expand_dims(image, axis=0))
    print("processing time: ", time.time() - start)
    print('detections:', detections)
    # compute predicted labels and scores
    predicted_labels = np.argmax(detections[0, :, 4:], axis=1)
    scores = detections[0, np.arange(detections.shape[1]), 4 + predicted_labels]
    print("label=", predicted_labels)
    # correct for image scale
    scaled_detection = detections[0, :, :4] / scale

    # visualize detections
    recognized = {}

    plt.figure(figsize=(15, 15))
    plt.axis('off')
    for idx, (label, score) in enumerate(zip(predicted_labels, scores)):
        if score < 0.35:
            continue
        b = scaled_detection[idx, :4].astype(int)
        cv2.rectangle(draw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 1)

        caption = test_generator.label_to_name(label)
        if caption == "equal":
            caption = "="
        cv2.putText(draw, caption, (b[0], b[1] - 1), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
        recognized[caption] = b
        print(caption + ", score=" + str(score))

    plt.imshow(draw)
    plt.show()
    return recognized, draw
Example #2
0
def create_generators(p):
	# create image data generator objects
	train_image_data_generator = keras.preprocessing.image.ImageDataGenerator(
		horizontal_flip=True,
	)
	val_image_data_generator = keras.preprocessing.image.ImageDataGenerator()

	train_generator = CSVGenerator(
		p['annotations'],
		p['classes'],
		train_image_data_generator,
		batch_size = p['batch-size']
	)

	if p['val-annotations']:
		validation_generator = CSVGenerator(
			p['val-annotations'],
			p['classes'],
			val_image_data_generator,
			batch_size = p['batch-size']
		)
	else:
		validation_generator = None

	return train_generator, validation_generator
Example #3
0
    def evaluate_generator(self,
                           annotations,
                           comet_experiment=None,
                           iou_threshold=0.5,
                           max_detections=200):
        """ Evaluate prediction model using a csv fit_generator

        Args:
            annotations (str): Path to csv label file, labels are in the format -> path/to/image.png,x1,y1,x2,y2,class_name
            iou_threshold(float): IoU Threshold to count for a positive detection (defaults to 0.5)
            max_detections (int): Maximum number of bounding box predictions
            comet_experiment(object): A comet experiment class objects to track

        Return:
            mAP: Mean average precision of the evaluated data
        """
        #Format args for CSV generator
        classes_file = utilities.create_classes(annotations)
        arg_list = utilities.format_args(annotations, classes_file, self.config)
        args = parse_args(arg_list)

        #create generator
        validation_generator = CSVGenerator(
            args.annotations,
            args.classes,
            image_min_side=args.image_min_side,
            image_max_side=args.image_max_side,
            config=args.config,
            shuffle_groups=False,
        )

        average_precisions = evaluate(validation_generator,
                                      self.prediction_model,
                                      iou_threshold=iou_threshold,
                                      score_threshold=args.score_threshold,
                                      max_detections=max_detections,
                                      save_path=args.save_path,
                                      comet_experiment=comet_experiment)

        # print evaluation
        total_instances = []
        precisions = []
        for label, (average_precision, num_annotations) in average_precisions.items():
            print('{:.0f} instances of class'.format(num_annotations),
                  validation_generator.label_to_name(label),
                  'with average precision: {:.4f}'.format(average_precision))
            total_instances.append(num_annotations)
            precisions.append(average_precision)

        if sum(total_instances) == 0:
            print('No test instances found.')
            return

        print('mAP using the weighted average of precisions among classes: {:.4f}'.format(
            sum([a * b for a, b in zip(total_instances, precisions)]) /
            sum(total_instances)))

        mAP = sum(precisions) / sum(x > 0 for x in total_instances)
        print('mAP: {:.4f}'.format(mAP))
        return mAP
Example #4
0
def create_models_local(p):
    test_image_data_generator = keras.preprocessing.image.ImageDataGenerator()
    test_generator = CSVGenerator(p['test_csv'], p['classes_csv'],
                                  test_image_data_generator, 1)
    model, training_model, prediction_model = create_models(
        num_classes=test_generator.num_classes(), p=p)

    return model, training_model, prediction_model, test_generator
def create_generators(args, preprocess_image):
    """Create generators for training and validation.

    Args:
        args             : parseargs object containing configuration for generators.
        preprocess_image : Function that preprocesses an image for the network.
    """
    common_args = {
        'batch_size': args.batch_size,
        'config': args.config,
        'image_min_side': args.image_min_side,
        'image_max_side': args.image_max_side,
        'preprocess_image': preprocess_image,
    }

    # create random transform generator for augmenting training data
    if args.random_transform:
        transform_generator = random_transform_generator(
            min_rotation=-0.1,
            max_rotation=0.1,
            min_translation=(-0.1, -0.1),
            max_translation=(0.1, 0.1),
            min_shear=-0.1,
            max_shear=0.1,
            min_scaling=(0.9, 0.9),
            max_scaling=(1.1, 1.1),
            flip_x_chance=0.5,
            flip_y_chance=0.5,
        )
        visual_effect_generator = random_visual_effect_generator(
            contrast_range=(0.9, 1.1),
            brightness_range=(-.1, .1),
            hue_range=(-0.05, 0.05),
            saturation_range=(0.95, 1.05))
    else:
        transform_generator = random_transform_generator(flip_x_chance=0.5)
        visual_effect_generator = None
    if args.dataset_type == 'csv':
        train_generator = CSVGenerator(
            args.annotations,
            args.classes,
            transform_generator=transform_generator,
            visual_effect_generator=visual_effect_generator,
            **common_args)

        if args.val_annotations:
            validation_generator = CSVGenerator(args.val_annotations,
                                                args.classes,
                                                shuffle_groups=False,
                                                **common_args)
        else:
            validation_generator = None
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return train_generator, validation_generator
Example #6
0
def create_generators(args, group_queue):
    # create image data generator objects
    train_image_data_generator = keras.preprocessing.image.ImageDataGenerator(
        horizontal_flip=True,
        vertical_flip=True,
        zoom_range=0.15,
        rotation_range=25)
    #    train_image_data_generator = keras.preprocessing.image.ImageDataGenerator(
    #        horizontal_flip=True
    #    )
    val_image_data_generator = keras.preprocessing.image.ImageDataGenerator()

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from keras_retinanet.preprocessing.coco import CocoGenerator

        train_generator = CocoGenerator(args.coco_path,
                                        'train2017',
                                        train_image_data_generator,
                                        batch_size=args.batch_size)

        validation_generator = CocoGenerator(args.coco_path,
                                             'val2017',
                                             val_image_data_generator,
                                             batch_size=args.batch_size)
    elif args.dataset_type == 'pascal':
        train_generator = PascalVocGenerator(args.pascal_path,
                                             'trainval',
                                             train_image_data_generator,
                                             batch_size=args.batch_size)

        validation_generator = PascalVocGenerator(args.pascal_path,
                                                  'test',
                                                  val_image_data_generator,
                                                  batch_size=args.batch_size)
    elif args.dataset_type == 'csv':
        train_generator = CSVGenerator(args.annotations,
                                       args.classes,
                                       args.mean_image,
                                       train_image_data_generator,
                                       batch_size=args.batch_size,
                                       image_min_side=int(args.image_min_side),
                                       image_max_side=int(args.image_max_side),
                                       group_queue=group_queue)

        if args.val_annotations:
            validation_generator = CSVGenerator(args.val_annotations,
                                                args.classes,
                                                args.mean_image,
                                                val_image_data_generator,
                                                batch_size=args.batch_size)
        else:
            validation_generator = None
    else:
        raise ValueError('Invalid data type received: {}'.format(dataset_type))

    return train_generator, validation_generator
Example #7
0
def create_generators(args):
    # create image data generator objects
    train_image_data_generator = keras.preprocessing.image.ImageDataGenerator(
        horizontal_flip=True,
        vertical_flip=True,
        rotation_range=180,
        width_shift_range=5,
        height_shift_range=5,
        preprocessing_function=add_noise  # Kai - add noise to input
    )
    val_image_data_generator = keras.preprocessing.image.ImageDataGenerator(
        # Kai - may want to add noise to input as well
    )

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from keras_retinanet.preprocessing.coco import CocoGenerator

        train_generator = CocoGenerator(args.coco_path,
                                        'train2017',
                                        train_image_data_generator,
                                        batch_size=args.batch_size)

        validation_generator = CocoGenerator(args.coco_path,
                                             'val2017',
                                             val_image_data_generator,
                                             batch_size=args.batch_size)
    elif args.dataset_type == 'pascal':
        train_generator = PascalVocGenerator(args.pascal_path,
                                             'trainval',
                                             train_image_data_generator,
                                             batch_size=args.batch_size)

        validation_generator = PascalVocGenerator(args.pascal_path,
                                                  'test',
                                                  val_image_data_generator,
                                                  batch_size=args.batch_size)
    elif args.dataset_type == 'csv':
        train_generator = CSVGenerator(args.annotations,
                                       args.classes,
                                       train_image_data_generator,
                                       batch_size=args.batch_size)

        if args.val_annotations:
            validation_generator = CSVGenerator(args.val_annotations,
                                                args.classes,
                                                val_image_data_generator,
                                                batch_size=args.batch_size)
        else:
            validation_generator = None
    else:
        raise ValueError('Invalid data type received: {}'.format(dataset_type))

    return train_generator, validation_generator
Example #8
0
def create_generators(args):
    # create image data generator objects
    train_image_data_generator = keras.preprocessing.image.ImageDataGenerator(
        horizontal_flip=True, )
    val_image_data_generator = keras.preprocessing.image.ImageDataGenerator()

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from keras_retinanet.preprocessing.coco import CocoGenerator

        train_generator = CocoGenerator(args.coco_path,
                                        'train2017',
                                        train_image_data_generator,
                                        batch_size=args.batch_size)

        validation_generator = CocoGenerator(args.coco_path,
                                             'val2017',
                                             val_image_data_generator,
                                             batch_size=args.batch_size)
    elif args.dataset_type == 'pascal':
        train_generator = PascalVocGenerator(args.pascal_path,
                                             'trainval',
                                             train_image_data_generator,
                                             batch_size=args.batch_size)

        validation_generator = PascalVocGenerator(args.pascal_path,
                                                  'test',
                                                  val_image_data_generator,
                                                  batch_size=args.batch_size)
    elif args.dataset_type == 'csv':
        train_generator = CSVGenerator(args.annotations,
                                       args.classes,
                                       train_image_data_generator,
                                       batch_size=args.batch_size,
                                       base_dir="./")

        if args.val_annotations:
            validation_generator = CSVGenerator(args.val_annotations,
                                                args.classes,
                                                val_image_data_generator,
                                                batch_size=args.batch_size,
                                                base_dir="./")
        else:
            validation_generator = None
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return train_generator, validation_generator
 def __create_generator(self, annotation_path, class_path):
     train_generator = CSVGenerator(
         annotation_path,
         class_path,
         transform_generator=self._transform_generator,
         **(self._common_args))
     return train_generator
Example #10
0
def create_generators(args):
    transform_generator = random_transform_generator(
        min_rotation=-0.1,  # radians
        max_rotation=0.1,
        min_translation=(-0.1, -0.1),  # could be pixels or percentage
        max_translation=(0.1, 0.1),
        min_shear=-0.1,
        max_shear=0.1,
        min_scaling=(0.9, 0.9),
        max_scaling=(1.1, 1.1),
        flip_x_chance=0.5,
        flip_y_chance=0.5,
    )

    train_generator = AugmentedGenerator(
        args.annotations,
        args.classes,
        transform_generator=transform_generator,
        batch_size=args.batch_size,
        image_min_side=args.image_min_side,
        image_max_side=args.image_max_side)

    if args.val_annotations:
        validation_generator = CSVGenerator(args.val_annotations,
                                            args.classes,
                                            batch_size=args.batch_size,
                                            image_min_side=args.image_min_side,
                                            image_max_side=args.image_max_side)
    else:
        validation_generator = None

    return train_generator, validation_generator
Example #11
0
def test_csv_generator_anchors():
    anchors_dict = get_anchors_params("tests/test-data/anchors.yaml")
    train_generator = CSVGenerator("tests/test-data/csv/annotations.csv",
                                   "tests/test-data/csv/classes.csv",
                                   transform_generator=None,
                                   batch_size=1,
                                   image_min_side=512,
                                   image_max_side=512,
                                   **anchors_dict)

    inputs, targets = train_generator.next()
    regreession_batch, labels_batch = targets
    labels = labels_batch[0]
    image = inputs[0]
    anchors = anchors_for_shape(image.shape, **anchors_dict)
    assert len(labels) == len(anchors)
def main():
    args = parse_args(sys.argv[1:])

    transform_generator = random_transform_generator(
        min_rotation=-0.1,
        max_rotation=0.1,
        min_translation=(-0.1, -0.1),
        max_translation=(0.1, 0.1),
        min_shear=-0.1,
        max_shear=0.1,
        min_scaling=(0.9, 0.9),
        max_scaling=(1.1, 1.1),
    )

    generator = CSVGenerator(
        args.annotations,
        args.classes,
        # transform_generator=transform_generator,
        base_dir=args.images_dir)

    os.makedirs('gen_images', exist_ok=True)
    for i in range(5):
        batch = next(generator)
        img_ar = batch[0].reshape(batch[0].shape[1:]).astype('uint8')
        img = Image.fromarray(img_ar)
        img.save('gen_images/{}.jpg'.format(i + 1))
Example #13
0
def create_generator(args, config):
    """ Create generators for evaluation.
    """
    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from keras_retinanet.preprocessing.coco import CocoGenerator

        validation_generator = CocoGenerator(
            args.coco_path,
            'val2017',
            image_min_side=args.image_min_side,
            image_max_side=args.image_max_side)
    elif args.dataset_type == 'pascal':
        validation_generator = PascalVocGenerator(
            args.pascal_path,
            'test',
            image_min_side=args.image_min_side,
            image_max_side=args.image_max_side)
    elif args.dataset_type == 'csv':
        validation_generator = CSVGenerator(args.annotations,
                                            args.classes,
                                            image_min_side=args.image_min_side,
                                            image_max_side=args.image_max_side)
    elif args.dataset_type == 'onthefly':
        validation_generator = onthefly.OnTheFlyGenerator(
            args.annotations,
            batch_size=args.batch_size,
            base_dir=config["rgb_tile_dir"],
            config=config)
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return validation_generator
Example #14
0
def main1():
    # for file in glob.glob("./snapshots/*_05.h5"):
    file = './snapshots/resnet50_csv_05.h5'
    # file = 'C:/Projects/OLD-keras-retinanet-master/snapshots/resnet50_csv_01.h5'
    map_total = 0

    for i in range(50, 100, 5):
        i = i / 100

        keras.backend.tensorflow_backend.set_session(get_session())
        model = keras.models.load_model(file, custom_objects=custom_objects)

        val_generator = CSVGenerator(
            csv_data_file='c:/MTSD/Updated/test - copy.csv',
            csv_class_file='c:/MTSD/Updated/classes.csv',
            base_dir='c:/MTSD/Updated/detection/',
            image_min_side=1440,
            image_max_side=2560,
            min_size=25)
        # analyse_images(val_generator)

        my_eval = eval.evaluate(val_generator,
                                model,
                                score_threshold=0.5,
                                iou_threshold=0.5,
                                save_path='C:/video-out/',
                                ground_truth=False)

        print(my_eval)

        print(sum(my_eval.values()) / 39)
        keras.backend.clear_session()
        break
def create_generator(args):
    """ Create generators for evaluation.
    """
    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from ..preprocessing.coco import CocoGenerator

        validation_generator = CocoGenerator(
            args.coco_path,
            'val2017',
            image_min_side=args.image_min_side,
            image_max_side=args.image_max_side,
            config=args.config)
    elif args.dataset_type == 'pascal':
        validation_generator = PascalVocGenerator(
            args.pascal_path,
            'test',
            image_min_side=args.image_min_side,
            image_max_side=args.image_max_side,
            config=args.config)
    elif args.dataset_type == 'csv':
        validation_generator = CSVGenerator(args.annotations,
                                            args.classes,
                                            image_min_side=args.image_min_side,
                                            image_max_side=args.image_max_side,
                                            config=args.config)
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return validation_generator
Example #16
0
    def create_generators(self, args, transform_generator=None):
        train_generator = CSVGenerator(args.annotations,
                                       args.classes,
                                       transform_generator=transform_generator,
                                       batch_size=args.batch_size)

        if args.val_annotations:
            validation_generator = CSVGenerator(args.val_annotations,
                                                args.classes,
                                                batch_size=args.batch_size)
        else:
            validation_generator = None

        return {
            "train_generator": train_generator,
            "validation_generator": validation_generator
        }
Example #17
0
    def __getitem__(self, index):
        inputs, targets = CSVGenerator.__getitem__(self, index)

        # for i,x in enumerate(inputs):
        #     temp = x
        #     temp = sk.exposure.rescale_intensity(temp,in_range=(0,255))
        #     temp = sk.util.random_noise(temp, var= random.uniform(0,.0005))
        #     temp = sk.exposure.adjust_gamma(temp,gamma=random.uniform(.5,1.5))
        #     inputs[i] = np.array(temp)

        return inputs, targets
Example #18
0
def create_generator():
    # create image data generator objects
    train_image_data_generator = keras.preprocessing.image.ImageDataGenerator(
        horizontal_flip=True, )

    train_generator = CSVGenerator(boring_annotations_path,
                                   boring_classes_path,
                                   train_image_data_generator,
                                   batch_size=batch_size)

    return train_generator
Example #19
0
def create_generator(annotations, classes):
    """ Create generators for evaluation.
	"""
    validation_generator = CSVGenerator(
        annotations,
        classes,
        image_min_side=800,
        image_max_side=1333,
        config=None,
        shuffle_groups=False,
    )
    return validation_generator
def create_generators(train_annotations, val_annotations, class_mapping, preprocess_image, batch_size,
                      data_augmentation=False, base_dir=None):
    if data_augmentation:
        transform_generator = kr_train.random_transform_generator(
            min_rotation=-0.1,
            max_rotation=0.1,
            min_translation=(-0.1, -0.1),
            max_translation=(0.1, 0.1),
            min_shear=-0.1,
            max_shear=0.1,
            min_scaling=(0.9, 0.9),
            max_scaling=(1.1, 1.1),
            flip_x_chance=0.5,
            flip_y_chance=0.5,
        )
    else:
        transform_generator = kr_train.random_transform_generator(flip_x_chance=0.5)

    # create the generators
    train_generator = CSVGenerator(
        train_annotations,
        class_mapping,
        transform_generator=transform_generator,
        base_dir=base_dir,
        preprocess_image=preprocess_image,
        batch_size=batch_size
    )

    if val_annotations:
        validation_generator = CSVGenerator(
            val_annotations,
            class_mapping,
            base_dir=base_dir,
            preprocess_image=preprocess_image,
            batch_size=batch_size
        )
    else:
        validation_generator = None

    return train_generator, validation_generator
Example #21
0
def create_generator(args, preprocess_image):
    """ Create generators for evaluation.
    """
    common_args = {
        'preprocess_image': preprocess_image,
    }

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from keras_retinanet.preprocessing.coco import CocoGenerator

        validation_generator = CocoGenerator(
            args.coco_path,
            'val2017',
            image_min_side=args.image_min_side,
            image_max_side=args.image_max_side,
            config=args.config,
            shuffle_groups=False,
            **common_args)
    elif args.dataset_type == 'pascal':
        validation_generator = PascalVocGenerator(
            args.pascal_path,
            'test',
            image_extension=args.image_extension,
            image_min_side=args.image_min_side,
            image_max_side=args.image_max_side,
            config=args.config,
            shuffle_groups=False,
            **common_args)
    elif args.dataset_type == 'csv':
        validation_generator = CSVGenerator(args.annotations,
                                            args.classes,
                                            image_min_side=args.image_min_side,
                                            image_max_side=args.image_max_side,
                                            config=args.config,
                                            shuffle_groups=False,
                                            **common_args)
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return validation_generator
Example #22
0
def evaluate_model(concepts, model_path,  min_examples, download_data=False):

    classmap = get_classmap(concepts)

    if download_data:
        folders = []
        folders.append(test_examples)
        for dir in folders:
            if os.path.exists(dir):
                shutil.rmtree(dir)
            os.makedirs(dir)
        download_annotations(min_examples, concepts, classmap, good_users, img_folder, train_annot_file, valid_annot_file, split=0)

    '''
    Initializing model for eval
    '''
    model = load_model(model_path, backbone_name='resnet50')
    model = convert_model(model)

    temp = pd.DataFrame(list(zip(classmap.values(), classmap.keys())))
    temp.to_csv('classmap.csv',index=False, header=False)
    test_generator = CSVGenerator(
        valid_annot_file,
        'classmap.csv',
        shuffle_groups=False,
        batch_size=batch_size
    )

    best_f1, best_thresh = f1_evaluation(test_generator, model, save_path=test_examples)

    total_f1 = 0
    for concept, f1 in best_f1.items():
        print("Concept: " + classmap[concept])
        print("F1 Score: " + str(f1))
        print("Confidence Threshold: " + str(best_thresh[concept]))
        print("")
        total_f1 += f1

    print("Average F1: " + str(total_f1/len(best_f1)))
    print("Find evaluation examples in: " + test_examples)
    '''
Example #23
0
weights = None
imagenet_weights = True
batch_size = 1
image_min_side = 225
image_max_side = 300

backbone_model = models.backbone('resnet50')
freeze_backbone = False
common_args = {
    'batch_size': batch_size,
    'image_min_side': image_min_side,
    'image_max_side': image_max_side,
    'preprocess_image': backbone_model.preprocess_image,
}

train_generator = CSVGenerator(annotations_path, classes_path, **common_args)
if validations_path != '':
    validation_generator = CSVGenerator(validations_path, classes_path,
                                        **common_args)
else:
    validation_generator = None

if snapshot is not None:
    print('Loading model, this may take a second...')
    model = models.load_model(snapshot, backbone_name=backbone)
    training_model = model
    prediction_model = retinanet_bbox(model=model)
else:
    weights = weights
    # default to imagenet if nothing else is specified
    if weights is None and imagenet_weights:
Example #24
0
def create_generator():
    validation_generator = CSVGenerator(
        "./wurenji/helmet/pre/test.csv",
        "./wurenji/helmet/pre/class.csv",
    )
    return validation_generator
Example #25
0
def create_generators(args, preprocess_image):
    """ Create generators for training and validation.

    Args
        args             : parseargs object containing configuration for generators.
        preprocess_image : Function that preprocesses an image for the network.
    """
    common_args = {
        'batch_size': args.batch_size,
        'config': args.config,
        'image_min_side': args.image_min_side,
        'image_max_side': args.image_max_side,
        'preprocess_image': preprocess_image,
    }

    # create random transform generator for augmenting training data
    if args.random_transform:
        transform_generator = random_transform_generator(
            min_rotation=-0.1,
            max_rotation=0.1,
            min_translation=(-0.1, -0.1),
            max_translation=(0.1, 0.1),
            min_shear=-0.1,
            max_shear=0.1,
            min_scaling=(0.9, 0.9),
            max_scaling=(1.1, 1.1),
            flip_x_chance=0.5,
            flip_y_chance=0.5,
        )
    else:
        transform_generator = random_transform_generator(flip_x_chance=0.5)

    if args.dataset_type == 'csv':
        train_generator = CSVGenerator(args.annotations,
                                       args.classes,
                                       transform_generator=transform_generator,
                                       **common_args)

        if args.val_annotations:
            validation_generator = CSVGenerator(args.val_annotations,
                                                args.classes, **common_args)
        else:
            validation_generator = None
    #region +mk@sail
    elif args.dataset_type == 'sail':
        if 'steps' in dir(args):
            train_generator = SAIL_Generator(args.annotations,
                                             args.classes,
                                             transform_generator=None,
                                             force_steps_per_epoch=args.steps,
                                             dataset_type='train',
                                             **common_args)
        else:
            train_generator = SAIL_Generator(args.annotations,
                                             args.classes,
                                             transform_generator=None,
                                             dataset_type='train',
                                             **common_args)

        if args.val_annotations:
            if 'val_steps' in dir(args):
                validation_generator = SAIL_Generator(args.val_annotations,
                                                      args.classes,
                                                      val_steps=args.val_steps,
                                                      dataset_type='val',
                                                      transform_generator=None,
                                                      **common_args)
            else:
                validation_generator = SAIL_Generator(args.val_annotations,
                                                      args.classes,
                                                      dataset_type='val',
                                                      transform_generator=None,
                                                      **common_args)
        else:
            validation_generator = None
    #endregion +mk@sail
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return train_generator, validation_generator
    # parse arguments
    args = parse_args()

    train_path = os.path.abspath(args.train_path)
    classes = os.path.abspath(args.classes_path)
    val_path = os.path.abspath(args.val_path)

    # create image data generator objects
    train_image_data_generator = keras.preprocessing.image.ImageDataGenerator(
        horizontal_flip=True, )

    # create a generator for training data
    train_generator = CSVGenerator(
        csv_data_file=train_path,
        csv_class_file=classes,
        image_data_generator=train_image_data_generator,
        batch_size=args.batch_size,
        image_min_side=args.min_side,
        image_max_side=args.max_side)

    test_image_data_generator = keras.preprocessing.image.ImageDataGenerator()

    # create a generator for testing data
    test_generator = CSVGenerator(
        csv_data_file=val_path,
        csv_class_file=classes,
        image_data_generator=test_image_data_generator,
        batch_size=args.batch_size,
        image_min_side=args.min_side,
        image_max_side=args.max_side)
Example #27
0
        default=0.1,
        type=float)

    return parser.parse_args()


if __name__ == '__main__':
    # parse arguments
    args = parse_args()

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # create the model
    print('Loading model, this may take a second...')
    model = keras.models.load_model(args.model, custom_objects=custom_objects)

    # create image data generator object
    test_image_data_generator = keras.preprocessing.image.ImageDataGenerator()

    # create a generator for testing data
    test_generator = CSVGenerator(args.csv, args.cls_file,
                                  test_image_data_generator)

    evaluate_csv(test_generator, model, args.score_threshold)
Example #28
0
def create_generators(args, preprocess_image):
    """ Create generators for training and validation.

    Args
        args             : parseargs object containing configuration for generators.
        preprocess_image : Function that preprocesses an image for the network.
    """
    common_args = {
        'batch_size'       : args.batch_size,
        'image_min_side'   : args.image_min_side,
        'image_max_side'   : args.image_max_side,
        'preprocess_image' : preprocess_image,
    }

    # create random transform generator for augmenting training data
    if args.random_transform:
        transform_generator = random_transform_generator(
            min_rotation=-0.05,
            max_rotation=0.05,
            min_translation=(-0.1, -0.1),
            max_translation=(0.1, 0.1),
            #min_shear=-0.1,
            #max_shear=0.1,
            min_scaling=(0.8, 0.8),
            max_scaling=(1.2, 1.2),
            flip_x_chance=0.5,
            #flip_y_chance=0.5,
        )
    else:
        transform_generator = random_transform_generator(flip_x_chance=0.5)

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from ..preprocessing.coco import CocoGenerator

        train_generator = CocoGenerator(
            args.coco_path,
            'train2017',
            transform_generator=transform_generator,
            **common_args
        )

        validation_generator = CocoGenerator(
            args.coco_path,
            'val2017',
            **common_args
        )
    elif args.dataset_type == 'pascal':
        train_generator = PascalVocGenerator(
            args.pascal_path,
            'trainval',
            transform_generator=transform_generator,
            **common_args
        )

        validation_generator = PascalVocGenerator(
            args.pascal_path,
            'test',
            **common_args
        )
    elif args.dataset_type == 'csv':
        train_generator = CSVGenerator(
            args.annotations,
            args.classes,
            transform_generator=transform_generator,
            **common_args
        )

        if args.val_annotations:
            validation_generator = CSVGenerator(
                args.val_annotations,
                args.classes,
                **common_args
            )
        else:
            validation_generator = None
    elif args.dataset_type == 'oid':
        train_generator = OpenImagesGenerator(
            args.main_dir,
            subset='train',
            version=args.version,
            labels_filter=args.labels_filter,
            annotation_cache_dir=args.annotation_cache_dir,
            parent_label=args.parent_label,
            transform_generator=transform_generator,
            **common_args
        )

        validation_generator = OpenImagesGenerator(
            args.main_dir,
            subset='validation',
            version=args.version,
            labels_filter=args.labels_filter,
            annotation_cache_dir=args.annotation_cache_dir,
            parent_label=args.parent_label,
            **common_args
        )
    elif args.dataset_type == 'kitti':
        train_generator = KittiGenerator(
            args.kitti_path,
            subset='train',
            transform_generator=transform_generator,
            **common_args
        )

        validation_generator = KittiGenerator(
            args.kitti_path,
            subset='val',
            **common_args
        )
    else:
        raise ValueError('Invalid data type received: {}'.format(args.dataset_type))

    return train_generator, validation_generator
Example #29
0
    # parse arguments
    args = parse_args()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # create image data generator objects
    train_image_data_generator = keras.preprocessing.image.ImageDataGenerator(
        horizontal_flip=True, )

    # create a generator for training data
    train_generator = CSVGenerator(
        csv_data_file=args.train_path,
        csv_class_file=args.classes,
        image_data_generator=train_image_data_generator,
        batch_size=args.batch_size)

    if args.val_path:
        test_image_data_generator = keras.preprocessing.image.ImageDataGenerator(
        )

        # create a generator for testing data
        test_generator = CSVGenerator(
            csv_data_file=args.val_path,
            csv_class_file=args.classes,
            image_data_generator=test_image_data_generator,
            batch_size=args.batch_size)
    else:
        test_generator = None
Example #30
0
    max_rotation=0.1,
    min_translation=(-0.2, -0.2),
    max_translation=(0.2, 0.2),
    min_shear=-0.2,
    max_shear=0.2,
    min_scaling=(0.75, 0.75),
    max_scaling=(-1.5, 1.5),
    flip_x_chance=0.3,
    flip_y_chance=0.3,
)

train_generator = CSVGenerator(
    TRAIN_DATA,
    LABELS,
    base_dir='/input/deepfashion_data',
    batch_size=1,
    image_max_side=img_max_side,
    image_min_side=img_min_side,
    #    transform_generator=transform_generator
)

validation_generator = CSVGenerator(
    VAL_DATA,
    LABELS,
    base_dir='/input/deepfashion_data',
    batch_size=1,
    image_min_side=img_min_side,
    image_max_side=img_max_side
)

weights = download_imagenet('resnet50')