Ejemplo n.º 1
0
def captureVideo():
    # We use a K80 GPU with 24GB memory, which can fit 3 images.
    ROOT_DIR = os.getcwd()
    MODEL_DIR = os.path.join(ROOT_DIR, "logs")
    VIDEO_DIR = os.path.join(ROOT_DIR, "videos")
    VIDEO_SAVE_DIR = os.path.join(VIDEO_DIR, "save")
    WEAPON_MODEL_PATH = "mask_rcnn_weapon_cfg_0009.h5"
    if not os.path.exists(WEAPON_MODEL_PATH):
        utils.download_trained_weights(WEAPON_MODEL_PATH)

    config = PredictionConfig()
    config.display()

    model = modellib.MaskRCNN(mode="inference",
                              model_dir=MODEL_DIR,
                              config=config)
    model.load_weights(WEAPON_MODEL_PATH, by_name=True)
    class_names = ['BG', 'weapon']

    capture = cv2.VideoCapture('video/trailer.mkv')
    try:
        if not os.path.exists(VIDEO_SAVE_DIR):
            os.makedirs(VIDEO_SAVE_DIR)
    except OSError:
        print('Error: Creating directory of data')
    frames = []
    frame_count = 0
    # these 2 lines can be removed if you dont have a 1080p camera.
    capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
    capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)

    while True:
        ret, frame = capture.read()
        # Bail out when the video file ends
        if not ret:
            break

        # Save each frame of the video to a list
        frame_count += 1
        frames.append(frame)
        print('frame_count :{0}'.format(frame_count))
        if len(frames) == config.BATCH_SIZE:
            results = model.detect(frames, verbose=0)
            print('Predicted')
            for i, item in enumerate(zip(frames, results)):
                frame = item[0]
                r = item[1]
                frame = display_instances(frame, r['rois'], r['masks'],
                                          r['class_ids'], class_names,
                                          r['scores'])
                if frame is not None:
                    name = '{0}.jpg'.format(frame_count + i -
                                            config.BATCH_SIZE)
                    name = os.path.join(VIDEO_SAVE_DIR, name)
                    cv2.imwrite(name, frame)
                    print('writing to file:{0}'.format(name))
            # Clear the frames array to start the next batch
            frames = []

    capture.release()
Ejemplo n.º 2
0
def SaveModel(pInModelPath, pOutModelDir):
    config = TempConfig()
    # show config
    config.display()

    inferencemdl = model.MaskRCNN(mode="inference",
                                  config=config,
                                  model_dir=os.path.dirname(pInModelPath))
    inferencemdl.load_weights(pInModelPath, by_name=True)
    inference_frozen_graph = freeze_session(
        keras.backend.get_session(),
        output_names=[out.op.name for out in inferencemdl.keras_model.outputs])
    tensorflow.train.write_graph(inference_frozen_graph,
                                 pOutModelDir,
                                 "maskrcnn_inference_model.pb",
                                 as_text=False)
    tensorflow.train.write_graph(inference_frozen_graph,
                                 pOutModelDir,
                                 "maskrcnn_inference_model.pbtxt",
                                 as_text=True)

    print("Inference saved")

    keras.backend.clear_session()

    trainmdl = model.MaskRCNN(mode="training",
                              config=config,
                              model_dir=os.path.dirname(pInModelPath))
    trainmdl.load_weights(pInModelPath, by_name=True)
    train_frozen_graph = freeze_session(
        keras.backend.get_session(),
        output_names=[out.op.name for out in trainmdl.keras_model.outputs])
    tensorflow.train.write_graph(train_frozen_graph,
                                 pOutModelDir,
                                 "maskrcnn_train_model.pb",
                                 as_text=False)
    tensorflow.train.write_graph(train_frozen_graph,
                                 pOutModelDir,
                                 "maskrcnn_train_model.pbtxt",
                                 as_text=True)

    print("Train saved")
Ejemplo n.º 3
0
def get_mrcnn_model():
    """
    初始化MRCNN模型,使用预训练权重
    :return:
    """
    if not os.path.exists(COCO_MODEL_PATH):
        utils.download_trained_weights(COCO_MODEL_PATH)

    class InferenceConfig(coco.CocoConfig):
        GPU_COUNT = 1
        IMAGES_PER_GPU = 10

    config = InferenceConfig()
    config.display()

    # Create model object in inference mode.
    model = modellib.MaskRCNN(mode="inference",
                              model_dir=MODEL_DIR,
                              config=config)

    # Load weights trained on MS-COCO
    model.load_weights(COCO_MODEL_PATH, by_name=True)

    return model
Ejemplo n.º 4
0
def run(img_dir,
        annos_dir,
        ONLY_TEST=1,
        STEPS_IS_LEN_TRAIN_SET=0,
        n_epochs=5,
        layer_string="5+",
        name="Faster_RCNN-"):
    """ heads: The RPN, classifier and mask heads of the network
        all: All the layers
        3+: Train Resnet stage 3 and up
        4+: Train Resnet stage 4 and up
        5+: Train Resnet stage 5 and up

        img_dir: path to directory containing images
        annos_dir: path to directory containing annotations """

    # torch.backends.cudnn.benchmark = True

    start_time = time.process_time()
    print("start time time(s): ", round(start_time, 2))

    # CONFIGURATION
    import config
    config = config.Config()
    config.NAME = name
    config.display()

    # TEST SET
    test_set = custom_dataset.LampPostDataset()
    test_set.load_dataset(img_dir, annos_dir, is_train=False)
    test_set.prepare()

    if not ONLY_TEST:
        # TRAINING SET
        train_set = custom_dataset.LampPostDataset()
        train_set.load_dataset(img_dir, annos_dir, is_train=True)
        train_set.prepare()

        print("Train: %d, Test: %d images" %
              (len(train_set.image_ids), len(test_set.image_ids)))

        if STEPS_IS_LEN_TRAIN_SET:
            config.STEPS_PER_EPOCH = len(train_set.image_info)

        data_time = time.process_time()
        print("load data time(s): ", round(data_time - start_time, 2),
              "total elapsed: ", round(data_time, 2))

        # LOAD MODEL
        model = modellib.MaskRCNN(config=config, model_dir='./models/')

        load_model_time = time.process_time()
        print("loading model time(s): ", round(load_model_time - data_time, 2),
              "total elapsed: ", round(load_model_time, 2))

        # LOAD WEIGHTS
        model.load_weights(
            './models/mask_rcnn_coco.pth',
            callback=True)  # exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
        # "mrcnn_bbox", "mrcnn_mask"]

        load_weights_time = time.process_time()
        print("loading weights time(s): ",
              round(load_weights_time - load_model_time, 2), "total elapsed: ",
              round(load_weights_time, 2))

        # Save final config before start training
        config.to_txt(model.log_dir)

        # TRAIN MODEL
        # train heads with higher lr to speedup the learning
        model.train_model(train_set,
                          test_set,
                          learning_rate=2 * config.LEARNING_RATE,
                          epochs=n_epochs,
                          layers=layer_string)

        train_time = time.process_time()
        print("training time(s): ",
              round((train_time - load_weights_time) / 60, 2),
              "total minutes elapsed: ", round(train_time, 2))

    # TEST MODEL
    modellib.device = torch.device(
        "cuda:0" if torch.cuda.is_available() else "cpu")
    model = modellib.MaskRCNN(config=config, models_dir='./models')

    # loading the trained weights of the custom dataset
    # last_model = model.find_last()[1]
    last_model = "./models/resnet50_imagenet.pth"
    print("loading model: ", last_model)
    model.load_weights(last_model)

    # Delete test model log directory
    os.rmdir(model.log_dir)

    image_id = 3
    # 1 = TMX7316010203-001499_pano_0000_001233 - only a hanging lamp post
    # 2 = TMX7316010203-001209_pano_0000_002760 - on the right, behind/above blue car
    # 3 = TMX7316010203-001187_pano_0000_002097 - clearly in the middle (old one) and further down the road on the right
    image, image_meta, gt_class_id, gt_bbox = modellib.load_image_gt(
        test_set, config, image_id)
    info = test_set.image_info[image_id]
    print("image ID: {}.{} ({}) {}".format(info["source"], info["id"],
                                           image_id,
                                           test_set.image_reference(image_id)))

    # Run object detection
    results = model.detect([image])

    # Display results
    r = results[0]
    visualize.display_instances(
        image,
        r['rois'],
        r['class_ids'],  # r['masks'],
        test_set.class_names,
        r['scores'],
        title="Predictions")
Ejemplo n.º 5
0
def main():

    manager = Manager()
    if settings.REALTIMEinput == False:  #   I not REALTIME input
        fileInput = getInput()

    while True:

        #----------  REALTIME CMD LINE INPUT. -------------------------------
        #   settings.REALTIMEinput = True

        if settings.REALTIMEinput == True:
            #  To start program: >> python3 shell.py

            print()
            userInput = input()  #   cmd <i>
# ------------------------------------------------------------------------

# ------------- FILEINPUT.  ---------------------------
#   settings.REALTIMEinput = False

        else:
            if fileInput != None and len(fileInput) != 0:
                userInput = fileInput.popleft()
            else:
                print()
                break


# ------------------------------------------------------------------------

#----   START processing commands --------------

        words = userInput.split()
        cmd = words[0]
        log("")
        for i in range(len(words)):
            log(words[i])

        #   Error check input
        if (cmd == "cr" or cmd == "de" or cmd == "rq"
                or cmd == "rl") and len(words) != 2:
            log("error: parameter missing")
            print("-1")

        #   Execute command
        else:
            #   Initialize the system.
            if cmd == "in":
                manager = Manager()  #   New manager
                print()
                output = 0

            #   Create process
            elif cmd == "cr":
                output = manager.create(int(words[1]))

            #   Destroy process
            elif cmd == "de":
                output = manager.destroy(int(words[1]))

            #   Request resource
            elif cmd == "rq":
                output = manager.request(int(words[1]))

            #   Release resource
            elif cmd == "rl":
                output = manager.release(int(words[1]))

            #   Timeout
            elif cmd == "to":
                output = manager.timeout()

            else:
                log("Invalid input command")
                print(str(-1) + " ")

            display(str(output))
Ejemplo n.º 6
0
    all visualizations in the notebook. Provide a
    central point to control graph sizes.

    Change the default size attribute to control the size
    of rendered images
    """
    _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))
    return ax


# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR,
                         "logs/tr_heads_nativ_nativ_0.0005||2018-07-16")
# MODEL_DIR = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
config = actionCLSS_Config()
config.display()

dataset_test = ShapesDataset()
dataset_test.load_shapes(10,
                         config.IMAGE_SHAPE[0],
                         config.IMAGE_SHAPE[1],
                         purpose='test')
dataset_test.prepare()


class InferenceConfig(actionCLSS_Config):
    GPU_COUNT = 1
    IMAGES_PER_GPU = 1


inference_config = InferenceConfig()