예제 #1
0
def run():

    config = InferenceConfig()
    config.display()
    # Create model object in inference mode.
    print('loading model')
    global model
    model = modellib.MaskRCNN(mode="inference",
                              model_dir='/tmp/logs',
                              config=config)
    # Load weights trained on MS-COCO
    print('loading pretrained weights')
    model.load_weights('data/mask_rcnn_coco.h5', by_name=True)
    model.keras_model._make_predict_function()

    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    bytes2bytes_pb2_grpc.add_Bytes2BytesServicer_to_server(
        Bytes2Bytes(), server)
    server.add_insecure_port('[::]:%i' % args.port)
    server.start()
    print('started image2segmentation service on port %i' % args.port)
    try:
        while True:
            time.sleep(_ONE_DAY_IN_SECONDS)
    except KeyboardInterrupt:
        server.stop(0)
예제 #2
0
def LoadModel():
    ROOT_DIR = os.getcwd()
    COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
    config = TriConfig()
    model = modellib.MaskRCNN(mode="inference", model_dir='./', config=config)
    model.load_weights(COCO_MODEL_PATH, by_name=True)
    return model
예제 #3
0
def MaskDetect(videoPath, franeIndex, outfile):

    videoObj = cv2.VideoCapture(videoPath)
    ROOT_DIR = os.getcwd()
    MODEL_DIR = os.path.join(ROOT_DIR, "logs")
    COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
    if videoObj.isOpened() is False:
        print('Can not open Video')
        return
    ret, frame = videoObj.read()
    [height, width, _] = frame.shape
    portrait = False
    maxL = max(height, width)
    if height == maxL:
        portrait = True
    minL = min(height, width)
    radio = maxL / minL
    minL = 64 * round(minL / 2**6)
    maxL = minL * radio
    maxL = 64 * round(maxL / 2**6)
    if portrait:
        height = maxL
        width = minL
    else:
        height = minL
        width = maxL
    # print(height,width)
    config = TriConfig()
    # config.IMAGE_SHAPE = np.array([maxL, maxL, 3])
    # config.IMAGE_MIN_DIM = minL
    # config.IMAGE_MAX_DIM = maxL
    model = modellib.MaskRCNN(mode="inference",
                              model_dir=MODEL_DIR,
                              config=config)
    # plot_model(model.keras_model, to_file='model.png')
    cell_num = 3
    index = 0
    while (videoObj.isOpened()):
        ret, frame = videoObj.read()
        index += 1
        if index < 27000:
            continue
        frame = cv2.resize(frame, (width, height))
        results = model.detect([frame], verbose=1)
        r = copy.deepcopy(results[0])
        print(r)
        featureMap = LocationFeature(results, width, height, ceilNum=cell_num)
        featureMap = featureMap.reshape([-1, cell_num, cell_num])
        print(featureMap)
        visualize.display_instances(frame, r['rois'], r['masks'],
                                    r['class_ids'], class_names, r['scores'])
        # print(results)
        # # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # cv2.imshow('frame', frame)
        # if cv2.waitKey(1) & 0xFF == ord('q'):
        #     break
    del model
    videoObj.release()
    def load_model(self, trained_model_path):
        inference_config = InferenceConfig()
        inference_config.DETECTION_MIN_CONFIDENCE = self.min_confidence
        inference_config.IMAGES_PER_GPU = 1
        inference_config.BATCH_SIZE = 1
        # for one gpu: BATCH_SIZE should be equal to IMAGES_PER_GPU and also shoul equal len(images)

        model = modellib.MaskRCNN(mode="inference",
                                  config=inference_config,
                                  model_dir=trained_model_path)
        model_path = model.find_last()[1]
        model.load_weights(model_path, by_name=True)
        return model
예제 #5
0
    def __init__(self):
        model_path = "Mask_RCNN/mask_rcnn_coco.h5"

        if not os.path.isfile(model_path):
            raise ValueError(500, "Mask-RCNN missing image segmentation model")

        configuration = InferenceConfig()

        # Create model object in inference mode.
        self.model = modellib.MaskRCNN(mode="inference",
                                       model_dir=model_path,
                                       config=configuration)

        # Load weights trained on MS-COCO
        self.model.load_weights(model_path, by_name=True)
예제 #6
0
    def __init__(self):
        ROOT_DIR = os.path.dirname(os.path.realpath(__file__))

        # Directory to save logs and trained model
        MODEL_DIR = os.path.join(ROOT_DIR, "logs")

        # Local path to trained weights file
        COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
        # Download COCO trained weights from Releases if needed
        # if not os.path.exists(COCO_MODEL_PATH):
        #     utils.download_trained_weights(COCO_MODEL_PATH)

        # Directory of images to run detection on
        #IMAGE_DIR = os.path.join(ROOT_DIR, "images")
        config = InferenceConfig()
        config.display()
        # Create model object in inference mode.
        self.model = modellib.MaskRCNN(mode="inference",
                                       model_dir=MODEL_DIR,
                                       config=config)

        # Load w eights trained on MS-COCO
        self.model.load_weights(COCO_MODEL_PATH, by_name=True)
        self.class_names = [
            'BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
            'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
            'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
            'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
            'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
            'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
            'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
            'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
            'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
            'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
            'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
            'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven',
            'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
            'scissors', 'teddy bear', 'hair drier', 'toothbrush'
        ]
예제 #7
0
    sys.exit(1)

if init_with == "checkpoint" and not os.path.exists(CHECKPOINT_PATH):
    print("No valid trainig checkpoint file found ...")
    sys.exit(1)

# Create directory for saving checkpoints and logs:
if not os.path.exists(train_config.MODEL_DIR):
    os.mkdir(train_config.MODEL_DIR)

# Download COCO trained weights from Releases if needed
if not os.path.exists(train_config.COCO_MODEL_PATH):
    utils.download_trained_weights(train_config.COCO_MODEL_PATH)

model = modellib.MaskRCNN(mode="training",
                          config=bowl_config,
                          model_dir=train_config.MODEL_DIR)

if init_with == "imagenet":
    model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
    # Load weights trained on MS COCO, but skip layers that
    # are different due to the different number of classes
    # See README for instructions to download the COCO weights
    model.load_weights(train_config.COCO_MODEL_PATH,
                       by_name=True,
                       exclude=[
                           "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox",
                           "mrcnn_mask"
                       ])
elif init_with == "checkpoint":
예제 #8
0
def model_train_evaluate(dataset_train_dir, dataset_eval_dir, coco_model_path,
                         trained_model_path):
    init_with = "coco"  # imagenet, coco, or last
    train_Heads_Only = False
    num_epochs = 5
    images_len = len(dataset_img_ids)
    train_images_len = int(images_len * 0.9)

    indices = list(range(0, images_len))

    np.random.seed(0)
    np.random.shuffle(indices)

    train_im_idxs = indices[0:train_images_len]
    eval_im_idxs = indices[train_images_len:images_len]

    file_eval_idx = open('eval_im_idxs.txt', 'w')

    for im_i in eval_im_idxs:
        file_eval_idx.write("%s\n" % im_i)
    file_eval_idx.close()

    print('Training on', train_images_len, 'images')
    print('Testing on', images_len - train_images_len, 'images')
    #trained_model_path = os.path.join(trained_model_path, "logs")

    # Download COCO trained weights from Releases if needed
    if not os.path.exists(coco_model_path):
        utils.download_trained_weights(coco_model_path)

    config = DataConfig()
    config.display()

    # Training dataset
    dataset_train = udacityDataset()
    dataset_train.add_dataset_dir(dataset_train_dir)
    dataset_train.load_images(train_im_idxs, config.IMAGE_SHAPE[0],
                              config.IMAGE_SHAPE[1])
    dataset_train.prepare()

    # Validation dataset
    dataset_val = udacityDataset()
    dataset_val.add_dataset_dir(dataset_eval_dir)
    dataset_val.load_images(eval_im_idxs, config.IMAGE_SHAPE[0],
                            config.IMAGE_SHAPE[1])
    dataset_val.prepare()

    # Load and display random samples
    image_ids = np.random.choice(dataset_train.image_ids, 2)

    for image_id in image_ids:
        image = dataset_train.load_image(image_id)
        mask, class_ids = dataset_train.load_mask(image_id)
        print(class_ids)
        visualize.display_top_masks(image, mask, class_ids,
                                    dataset_train.class_names)

    # Create model in training mode
    model = modellib.MaskRCNN(mode="training",
                              config=config,
                              model_dir=trained_model_path)

    # Which weights to start with?

    if init_with == "imagenet":
        model.load_weights(model.get_imagenet_weights(), by_name=True)
    elif init_with == "coco":
        # Load weights trained on MS COCO, but skip layers that
        # are different due to the different number of classes
        # See README for instructions to download the COCO weights
        model.load_weights(coco_model_path,
                           by_name=True,
                           exclude=[
                               "mrcnn_class_logits", "mrcnn_bbox_fc",
                               "mrcnn_bbox", "mrcnn_mask"
                           ])
    elif init_with == "last":
        # Load the last model you trained and continue training
        model.load_weights(model.find_last()[1], by_name=True)

    if train_Heads_Only:
        layers_to_train = 'heads'
    else:
        layers_to_train = 'all'

    model.train(dataset_train,
                dataset_val,
                learning_rate=config.LEARNING_RATE,
                epochs=num_epochs,
                layers=layers_to_train)
예제 #9
0
import os
import numpy as np
from tqdm import tqdm

import Mask_RCNN.model as modellib
from Mask_RCNN.inference_config import inference_config
from Mask_RCNN.bowl_dataset import BowlDataset
from Mask_RCNN.utils import rle_encode, rle_decode, rle_to_string
import Mask_RCNN.functions as f

ROOT_DIR = os.getcwd()
MODEL_DIR = os.path.join(ROOT_DIR, "logs")

# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
                          config=inference_config,
                          model_dir=MODEL_DIR)

# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = os.path.join(ROOT_DIR, ".h5 file name here")
model_path = model.find_last()[1]

# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)

dataset_test = BowlDataset()
dataset_test.load_bowl('stage1_test')
dataset_test.prepare()
예제 #10
0

file_dir = os.path.dirname(os.path.abspath(__file__))
project_dir = os.path.dirname(file_dir)

dataset_coco_dir = project_dir + '/datasets/coco'
dataset_sixd_dir = project_dir + '/datasets/sixd/doumanoglou/train'

COCO_MODEL_PATH = 'Mask_RCNN/mask_rcnn_coco.h5'
if not os.path.exists(COCO_MODEL_PATH):
    utils.download_trained_weights(COCO_MODEL_PATH)

dataset_train = SixdDataset()
dataset_train.load_sixd(3000, dataset_coco_dir, 'train', dataset_sixd_dir)
dataset_train.prepare()

dataset_val = SixdDataset()
dataset_val.load_sixd(250, dataset_coco_dir, 'val', dataset_sixd_dir)
dataset_val.prepare()

config = SixdConfig()
model = modellib.MaskRCNN(mode='training', config=config, model_dir='logs')
model.load_weights(COCO_MODEL_PATH, by_name=True,
                   exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
                            "mrcnn_bbox", "mrcnn_mask"])

model.train(dataset_train, dataset_val,
            learning_rate=config.LEARNING_RATE,
            epochs=1,
            layers='heads')
예제 #11
0
 def build_model(self, mode, config, architecture):
     # Create model in training mode
     return mrnn_modellib.MaskRCNN(mode=mode,
                                   config=config,
                                   architecture=architecture,
                                   model_dir=self.model_root_dir)
예제 #12
0
ROOT_DIR = os.path.join(ROOT_DIR, 'Mask_RCNN')
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")

# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed

if not os.path.exists(COCO_MODEL_PATH):
    utils.download_trained_weights(COCO_MODEL_PATH)

config = InferenceConfig()
config.display()

# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)

# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)

# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
               'bus', 'train', 'truck', 'boat', 'traffic light',
               'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
               'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
               'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
               'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
               'kite', 'baseball bat', 'baseball glove', 'skateboard',
               'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
예제 #13
0
    else:

        class InferenceConfig(CocoConfig):
            # Set batch size to 1 since we'll be running inference on
            # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
            GPU_COUNT = 1
            IMAGES_PER_GPU = 1
            DETECTION_MIN_CONFIDENCE = 0

        config = InferenceConfig()
    config.display()

    # Create model
    if args.command == "train":
        model = modellib.MaskRCNN(mode="training",
                                  config=config,
                                  model_dir=args.logs)
    else:
        model = modellib.MaskRCNN(mode="inference",
                                  config=config,
                                  model_dir=args.logs)

    # Select weights file to load
    if args.model.lower() == "coco":
        model_path = COCO_MODEL_PATH
    elif args.model.lower() == "last":
        # Find last trained weights
        model_path = model.find_last()[1]
    elif args.model.lower() == "imagenet":
        # Start from ImageNet trained weights
        model_path = model.get_imagenet_weights()