def main():
    array = sys.argv[1:]

    if os.path.exists(array[0]): 
        path_to_weight = array[0]
        sys.exit(0)
    else: 
        print('path to weight does not exist')
        sys.exit(0)
    if os.path .exists(array[1]): path_to_image =array[1]
    else: 
        print('path to image does not exist')
        sys.exit(0)
    if float(array[2]) <= 1 and float(array[2]) >= 0: conf=array[2]
    else: 
        print('confidence must be a float')
        sys.exit(0)

    config = TestConfig() 
    config.DETECTION_MIN_CONFIDENCE = conf

	# define the model
	rcnn = MaskRCNN(mode='inference', model_dir='./load_weights', config=config)
	# load coco model weights
	rcnn.load_weights(path_to_weight, by_name=True)
	# load photograph
	img = load_img(path_to_image)
	img = img_to_array(img)
	# make prediction
	results = rcnn.detect([img], verbose=1)
	# get dictionary for first prediction
	r = results[0]
	# show photo with bounding boxes, masks, class labels and scores
	display_instances(img, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'])
Exemple #2
0
def main(images_dir, ann_file, pred_file, net_weights_file):
    
    # test set
    test_set = OurDataset()
    test_set.load_dataset(images_dir, ann_file, is_train=True, val_percentage = 0.0)
    test_set.prepare()
    print('Test images: %d' % len(test_set.image_ids))
    
    # create config
    cfg = PredictionConfig()

    # define the model
    model = MaskRCNN(mode='inference', model_dir='./', config=cfg)

    # load model weights
    model.load_weights(net_weights_file, by_name=True)

    # generate the annotations for the test set using the model
    json_output = generateAnnotations(test_set,model,cfg)

    # save the file
    with open(pred_file, 'w') as outfile:
        json.dump(json_output, outfile)

    print('RESULTS FILE GENERATED')
    
    # evalutate the predictions and save them in results.txt
    evaluation(ann_file,pred_file)
    
    print('EVALUATION DONE')
    
    print('PROGRAM FINISHED')
Exemple #3
0
def main():
    # load training set (75/15/15 split between train/test/validation)
    train_set = prep_dataset(os.path.join(DATA_PATH, 'train'))
    test_set = prep_dataset(os.path.join(DATA_PATH, 'test'))

    # generate model
    config = LicensePlateConfig()
    model = MaskRCNN(mode='training',
                     model_dir=os.path.join(WEIGHT_PATH, 'log/'),
                     config=config)

    # load pre-trained MS COCO weights
    model.load_weights(os.path.join(WEIGHT_PATH, 'mask_rcnn_coco.h5'),
                       by_name=True,
                       exclude=[
                           'mrcnn_class_logits', 'mrcnn_bbox_fc', 'mrcnn_bbox',
                           'mrcnn_mask'
                       ])

    # train top layer
    model.train(train_set,
                test_set,
                learning_rate=config.LEARNING_RATE,
                epochs=10,
                layers='heads')

    # adjust learning rate for finetuning to avoid overfitting
    config.LEARNING_RATE = 1e-5

    # finetune all layers
    model.train(train_set,
                test_set,
                learning_rate=config.LEARNING_RATE,
                epochs=5,
                layers='all')
Exemple #4
0
    def predictImage(self, imagePath):
        testConfig = TestConfig()
        testConfig.NUM_CLASSES = 1 + len(self.classes)
        testConfig.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + testConfig.NUM_CLASSES
        xmlPath = imagePath[0:imagePath.rfind(".")] + ".xml"
        rcnn = MaskRCNN(mode='inference', model_dir='./', config=testConfig)
        # load coco model weights
        # J. modificar con el path al modelo.
        rcnn.load_weights(self.modelWeights, by_name=True)

        # load the input image (in BGR order), clone it, and preprocess it
        img = load_img(imagePath)
        img = img_to_array(img)
        (hI, wI, d) = img.shape
        # detect objects in the input image and correct for the image scale
        # Poner short=512

        results = rcnn.detect([img], verbose=1)
        r = results[0]
        boxes1 = []
        for (box, score, cid) in zip(r['rois'], r['scores'], r['class_ids']):
            if score < self.CONFIDENCE:
                continue
            # Añadir label que sera con net.classes[cid]
            boxes1.append(([self.classes[cid - 1], box], score))

        file = open(xmlPath, "w")
        file.write(
            self.generateXML(
                imagePath.split("/")[-1], imagePath[0:imagePath.rfind("/")],
                wI, hI, d, boxes1))
        file.close()
        self.combineImageAndPrediction(imagePath, xmlPath)
Exemple #5
0
class ObjectDetectorMaskRCNN(ObjectDetector):
    def _init_dir(self, path):
        # Root directory of the project
        self._root_dir = Path(path)
        # Directory to save logs and trainedP model
        self._model_dir = os.path.join(self._root_dir, "logs")
        # Local path to trained weights file
        self._coco_model_path = os.path.join(self._root_dir, "mask_rcnn_coco.h5")
        # Download COCO trained weights from Releases if needed
        if not os.path.exists(self._coco_model_path):
            mrcnn.utils.download_trained_weights(self._coco_model_path)
        # Directory of images to run detection on
        # self._image_dir = os.path.join(self._root_dir, "images")

    def _load_model(self):
        # Create a Mask-RCNN model in inference mode
        self._model = MaskRCNN(mode="inference", model_dir=self._model_dir, config=MaskRCNNConfig())
        # Load pre-trained model
        self._model.load_weights(self._coco_model_path, by_name=True)

    def __init__(self, path='.'):
        self._init_dir(path=path)
        self._load_model()
    
    def detect(self, img):
        # img: RGB
        # cv2 default: BGR
        r = self._model.detect([img], verbose=1)[0]
        # The r variable will now have the results of detection:
        # - r['rois'] are the bounding box of each detected object
        # - r['class_ids'] are the class id (type) of each detected object
        # - r['scores'] are the confidence scores for each detection
        # - r['masks'] are the object masks for each detected object (which gives you the object outline)
        return r
Exemple #6
0
def prediction_masks(images, model_option):
    if model_option=='random':
        predictions = np.zeros((*images.shape[0:3],5))
        for i, image in enumerate(images):
            predictions[i] = random_predictions(image_size=(350,525))
    elif model_option=='fcn':
        #Local path
        model_path = 'image_segmentation.h5'
        #Colab path
        #model_path = 'drive/My Drive/CS 583/Project/image_segmentation.h5'
        model = load_model(model_path)
        predictions = model.predict(images)
    elif model_option=='mask-rcnn':
        #Recreate the model in inference mode
        inference_config = InferenceConfig()
        model = MaskRCNN(mode='inference', config=inference_config, model_dir='./')
        #Local path
        model_path = 'mask_rcnn_clouds_config_0001.h5'
        #Colab path
        #model_path = 'drive/My Drive/CS 583/Project/mask_rcnn_clouds_config_0001.h5'
        #Load trained weights
        model.load_weights(model_path, by_name=True)
        samples = np.zeros((images.shape[0],1024,1024,images.shape[-1]))
        for i, name in enumerate(names):
            image = Image.open('data/test_images/'+name)
            image = image.resize((1024,682))
            new_im = Image.new("RGB", (1024,1024))
            samples[i] = new_im.paste(image, ((0,170)))
        predictions = model.detect(samples, verbose=0)
        for i, pred in enumerate(predictions):
          pred_mask = predictions[i]['masks']
          predictions[i] = pred_mask[i][170:854,:] #Trim off top and bottom padding
    else:
        print('Error: No such model option.')
    return predictions
Exemple #7
0
    def predict(self, images, verbose=False):
        '''
        '''

        if not self.test_model:

            model = MaskRCNN(mode="inference", 
                              config=C.TestingConfig(),
                              model_dir=self.model_dir)

            weights = model.find_last()

            model.load_weights(weights, by_name=True)

            self.test_model = model

        results = []
        for image in images:
            results.append(self.test_model.detect([image])[0])

        if verbose:
            r = results[0]
            visualize.display_instances(images[0], r['rois'], r['masks'], r['class_ids'], 
                                        ["",""], r['scores'],figsize=(10,10))


        return results
def train(config=SeptinConfig()):
    """Train the model."""
    # Training dataset.
    dataset_train = SeptinDataset()
    dataset_train.load_Septin("train")
    dataset_train.prepare()
    print('Train: %d' % len(dataset_train.image_ids))

    # Validation dataset
    dataset_val = SeptinDataset()
    dataset_val.load_Septin("test")
    dataset_val.prepare()
    print('Test: %d' % len(dataset_val.image_ids))

    config = SeptinConfig()
    config.display()

    # define the model
    model = MaskRCNN(mode='training', model_dir='./', config=config)
    # load weights (mscoco) and exclude the output layers
    model.load_weights('mask_rcnn_coco.h5',
                       by_name=True,
                       exclude=[
                           "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox",
                           "mrcnn_mask"
                       ])
    # train weights (output layers or 'heads')
    model.train(dataset_train,
                dataset_val,
                learning_rate=config.LEARNING_RATE,
                epochs=8,
                layers='heads')
Exemple #9
0
def define_model(model=None, class_names=None):
    K.clear_session()

    if Path(model).exists() == False:
        model = './djangoserver/mask_rcnn_coco.h5'
        # define 81 classes that the coco model knowns about
        class_names = [
            'BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
            'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
            'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
            'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
            'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
            'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
            'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
            'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
            'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
            'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
            'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
            'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven',
            'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
            'scissors', 'teddy bear', 'hair drier', 'toothbrush'
        ]
    rcnn = MaskRCNN(mode='inference',
                    config=TestConfig(class_names),
                    model_dir='./djangoserver/')
    rcnn.load_weights(model, by_name=True)

    return rcnn, class_names
Exemple #10
0
def get_model():
    # Create a Mask-RCNN model in inference mode
    model = MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=MaskRCNNConfig())

    # Load pre-trained model
    model.load_weights(COCO_MODEL_PATH, by_name=True)

    return model 
Exemple #11
0
def load():
    global model
    model = MaskRCNN(mode='inference',
                     config=ConfigInference(),
                     model_dir='/opt/beslim.ai/var/run/model/mrcnn')

    model.load_weights('/opt/beslim.ai/etc/mask_rcnn_config_train_0022.h5',
                       by_name=True)
Exemple #12
0
def load_model():
    #Load Prediction config
    cfg = PredictionConfig()
    # define the model
    model = MaskRCNN(mode='inference', model_dir='./', config=cfg)
    # load model weights
    model_path = 'model_weights.h5'
    model.load_weights(model_path, by_name=True)
    return model, cfg
def init_model_config():
    print("[x] to close")
    config = PredictionConfig()
    sourcePath = os.path.dirname(os.path.abspath(__file__))
    model = MaskRCNN(mode='inference', model_dir=sourcePath, config=config)
    modelAbsolutePath = os.path.join(sourcePath, "model.h5")
    print(modelAbsolutePath)
    model.load_weights(modelAbsolutePath, by_name=True)
    return model, config
def before_first_request():
    global model, graph, session

    graph = tf.Graph()
    session = tf.Session(graph=graph)
    weight_location = os.path.join('weights', 'weights.h5')
    with graph.as_default():
        with session.as_default():
            model = MaskRCNN(mode='inference', config=InferenceConfig(), model_dir=str())
            model.load_weights(weight_location, by_name=True)
def predict():
    config = PredictionConfig()
    model_predict = MaskRCNN(
        mode="inference",
        model_dir=
        '../web app implemenation/logs/parking_cfg20200617T0142/mask_rcnn_parking_cfg_0005.h5',
        config=config)
    model_predict.load_weights(
        '../web app implemenation/logs/parking_cfg20200617T0142/mask_rcnn_parking_cfg_0005.h5',
        by_name=True)
    return model_predict
Exemple #16
0
def run(train_csv, imagedir, model_path):
    train_data = utils.load(train_csv)
    classes = utils.determine_classes(train_data)

    train_set = prepare_dataset(train_data, imagedir, classes)

    cfg = PredictionConfig()
    model = MaskRCNN(mode="inference", model_dir=imagedir, config=cfg)
    model.load_weights(model_path, by_name=True)
    train_mAP = evaluate_model(train_set, model, cfg, classes, model_path)
    print("Train mAP: %.3f" % train_mAP)
Exemple #17
0
def predict():
    import numpy as np
    from mrcnn.model import MaskRCNN
    from mrcnn.model import mold_image
    import skimage.io

    model_config = config.PredictionConfig()
    model = MaskRCNN(mode='inference', model_dir='./', config=model_config)
    model.keras_model.metrics_tensors = []
    # load model weights
    model.load_weights(config.keras_model_dir, by_name=True)

    dir_path = '../data/test'
    outfile = open('../data/submission.csv', 'w')
    for id in range(1, 1515):
        # 读取文件报错,暂不知道解决方案
        if id == 50:
            outfile.write('{},{},{}\n'.format(id, 5, 0))
            continue
        elif id == 227:
            outfile.write('{},{},{}\n'.format(id, 1, 1))
            continue
        elif id == 1201:
            outfile.write('{},{},{}\n'.format(id, 2, 0))
            continue
        try:
            file_path = '{}/{}.jpg'.format(dir_path, id)
            image = skimage.io.imread(file_path)
            scaled_image = mold_image(image, model_config)
            sample = np.expand_dims(scaled_image, 0)
            yhat = model.detect(sample, verbose=0)[0]
        except:
            print(file_path)
            continue
        # 按照得分进行排序
        indices = np.argsort(yhat["scores"])[::-1]
        boxes = []
        for i in range(len(indices)):
            boxes.append([
                yhat["class_ids"][i] - 1, yhat['rois'][i][1],
                yhat['rois'][i][0], yhat['rois'][i][3], yhat['rois'][i][2]
            ])
        boxes = np.array(boxes)
        boxes = boxes[indices]
        hat = 0
        person = 0
        for box in boxes:
            label = box[0]
            if label == 0:
                hat += 1
            else:
                person += 1
        outfile.write('{},{},{}\n'.format(id, hat, person))
    outfile.close()
Exemple #18
0
def run(train_csv, test_csv, imagedir, model_path):
    data = utils.load(train_csv)
    classes = utils.determine_classes(data)

    test_data = utils.load(test_csv, is_train=False)
    test_set = prepare_dataset(test_data, imagedir, classes)

    cfg = PredictionConfig()
    model = MaskRCNN(mode="inference", model_dir=imagedir, config=cfg)
    model.load_weights(model_path, by_name=True)
    evaluate_model(test_set, model, cfg, classes, model_path)
    print("Done!")
Exemple #19
0
def run(csv, model_dir, model_file, img_name):
    data = utils.load(csv)
    classes = utils.determine_classes(data)
    train_set = prepare_dataset(data, model_dir, classes)

    cfg = PredictionConfig()
    model = MaskRCNN(mode="inference", model_dir=model_dir, config=cfg)
    model.load_weights(model_file, by_name=True)

    idx = data[data["image"] == img_name].index[0]
    plot(train_set, model, cfg, classes, 0, idx)
    pyplot.show()
def predictImage(train_set, test_set, modelPath):
    # create config
    cfg = PredictionConfig()
    # define the model
    model = MaskRCNN(mode='inference', model_dir='./model', config=cfg)
    # load model weights
    model_path = modelPath
    model.load_weights(model_path, by_name=True)
    # plot predictions for train dataset
    plot_actual_vs_predicted(train_set, model, cfg)
    # plot predictions for test dataset
    plot_actual_vs_predicted(test_set, model, cfg)
Exemple #21
0
def main():
    # define the model
    rcnn = MaskRCNN(mode='inference', model_dir='./', config=TestConfig())
    # load coco model weights
    rcnn.load_weights(model, by_name=True)
    # load photograph
    img = load_img(image)
    img = img_to_array(img)
    # make prediction
    results = rcnn.detect([img], verbose=0)
    # visualize the results
    draw_image_with_boxes(image, results[0]['rois'])
Exemple #22
0
def detectCars(image):
    class TestConfig(Config):
        NAME = "test"
        GPU_COUNT = 1
        IMAGES_PER_GPU = 1
        NUM_CLASSES = 1 + 80

    rcnn = MaskRCNN(mode='inference', model_dir='./', config=TestConfig())
    rcnn.load_weights('mask_rcnn_coco.h5', by_name=True)
    results = rcnn.detect([image], verbose=0)
    r = results[0]
    return get_car_boxes(r['rois'], r['class_ids']).tolist()
Exemple #23
0
def load_model():

    # model  configuration object
    cfg = PredictionConfig()
    global model
    # load model weight
    model = MaskRCNN(mode='inference', model_dir='./', config=cfg)
    model.load_weights('../Mask_RCNN/mask_rcnn_only_crossings_0098.h5',
                       by_name=True)
    # This is very important
    model.keras_model._make_predict_function()
    return model
def run(csv, model_dir, model_file, img_name):
    data = utils.load(csv)
    classes = utils.determine_classes(data)
    train_set = prepare_dataset(data, model_dir, classes)

    cfg = PredictionConfig()
    model = MaskRCNN(mode="inference", model_dir=model_dir, config=cfg)
    model.load_weights(model_file, by_name=True)

    image_path = f"{model_dir}/{img_name}"
    plot(train_set, model, cfg, image_path, classes)
    pyplot.show()
def predict(train_set, test_set, modelPath):
    # create config
    cfg = PredictionConfig()
    # define the model
    model = MaskRCNN(mode='inference', model_dir='./model', config=cfg)
    # load model weights
    model.load_weights(modelPath, by_name=True)
    # evaluate model on training dataset
    train_mAP = evaluate_model(train_set, model, cfg)
    print("Train mAP: %.3f" % train_mAP)
    # evaluate model on test dataset
    test_mAP = evaluate_model(test_set, model, cfg)
    print("Test mAP: %.3f" % test_mAP)
Exemple #26
0
def before_first_request():
    global model, graph, session

    graph = tf.Graph()
    session = tf.Session(graph=graph)
    with graph.as_default():
        with session.as_default():
            model = MaskRCNN(mode='inference',
                             config=InferenceConfig(),
                             model_dir='')

            weight_location = os.environ.get('WEIGHTS',
                                             'weights/model_weights.h5')
            model.load_weights(weight_location, by_name=True)
Exemple #27
0
def main():
    config = PredictionConfig()

    model = MaskRCNN(mode='inference', model_dir='./', config=config)
    # my model
    model.load_weights(TRAINED_MODEL_PATH, by_name=True)

    test_set = ObjectDataset()
    test_set.load_dataset('test')
    test_set.prepare()

    print('Test: %d' % len(test_set.image_ids))
    for i in range(10, 25, 4):
        plot_actual_vs_predicted(test_set, model, config, i)
    def getAnalyzed(self):
        ##        print("Analyzing...")
        self.lblResult.setText("Analyzing...")

        rcnn = MaskRCNN(mode='inference',
                        model_dir='./',
                        config=InferenceConfig())
        rcnn.load_weights(self.weigths, by_name=True)

        image = cv2.imread(str(self.image_file))
        results = rcnn.detect([image], verbose=0)
        r = results[0]

        masked_image = np.array(image.copy())
        boxes = r['rois']
        masks = r['masks']
        color = (0, 255, 255)
        alpha = 0.75
        for i in range(boxes.shape[0]):
            y1, x1, y2, x2 = boxes[i]
            mask = masks[:, :, i]
            for c in range(3):
                masked_image[:, :, c] = np.where(
                    mask == 1, ((masked_image[:, :, c] / 255) * color[c]),
                    masked_image[:, :, c])

            padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2),
                                   dtype=np.uint8)
            padded_mask[1:-1, 1:-1] = mask
            contours = find_contours(padded_mask, 0.5)
            for verts in contours:
                verts = np.array(np.fliplr(verts) - 1, np.int32)
                verts = verts.reshape((-1, 1, 2))
                cv2.polylines(masked_image, [verts],
                              True, (0, 255, 255),
                              thickness=1)

            cv2.rectangle(masked_image, (x1, y1), (x2, y2), color, 2)

        date_and_time = str(
            datetime.datetime.now().strftime("%I_%M_%p_%B_%d_%Y_")) + ".jpg"
        cv2.imwrite(str(date_and_time), masked_image)

        pixmap = QtGui.QPixmap(
            date_and_time)  #Setup pixmap with the provided image
        pixmap = pixmap.scaled(self.lblResult.width(), self.lblResult.height(),
                               QtCore.Qt.KeepAspectRatio)  #Scale pixmap
        self.lblResult.setPixmap(pixmap)  #Set the pixmap into the label
        self.lblResult.setAlignment(
            QtCore.Qt.AlignCenter)  #Align the label to center
def main(img_id, img_path):
    # holdout validation dataset
    validation_set = prep_dataset(os.path.join(DATA_PATH, 'validation'))

    # generate model
    config = PredictionConfig()
    model = MaskRCNN(mode='inference',
                     model_dir=os.path.join(WEIGHT_PATH, 'log/'),
                     config=config)

    # load pre-trained weights for LP dataset
    model.load_weights(os.path.join(WEIGHT_PATH, 'mask_rcnn_lp.h5'),
                       by_name=True)

    # no images, evaluate using PASCAL VOC mAP on holdout validation set
    if img_id is None and img_path is None:
        val_mAP = evaluate(validation_set, model, config)
        print('mAP on validation set: {:.4f}'.format(val_mAP))
        return
    # demo on validation image
    elif img_id is not None:
        img_id = int(img_id)

        # load image and corresponding mask
        img = validation_set.load_image(img_id)
        mask = validation_set.load_mask(img_id)[0]

        # object detection
        pred = detect(model, img, config)

        # generate images
        fig, axes = plt.subplots(1, 2, figsize=(10, 5))
        add_mask(axes[0], img, mask)
        add_detected_img(axes[1], img, pred)
    else:
        img_path = os.path.abspath(img_path)

        # read and preprocess image
        img = cv2.imread(img_path)

        # object detection
        pred = detect(model, img, config)

        # generate image
        fig, ax = plt.subplots(figsize=(10, 5))
        add_detected_img(ax, img, pred)

    plt.show()
    def __init__(self, model_folder, model_name):

        self.cfg = PredictionConfig()
        # load model
        model = MaskRCNN(mode='inference',
                         model_dir=model_folder,
                         config=self.cfg)
        model_full_path = os.path.join(model_folder, model_name)
        model.load_weights(model_full_path, by_name=True)
        self.model = model
        self.class_category_list = {
            1: 'router',
            2: 'ceiling fan',
            3: 'tv',
            4: 'laptop'
        }