Ejemplo n.º 1
0
def _main_(args):
    """
    :param args: command line argument
    """

    # parse command line argument
    config_path = args.conf
#    gpu_num = args.gpu

#    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
#    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_num  # specify which GPU(s) to be used

    # open and load the config json
    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    # parse the json to retrieve the training configuration
    backend = config["model"]["backend"]
    input_size = (config["model"]["im_width"], config["model"]["im_height"])
    classes = config["model"]["classes"]
    data_dir = config["train"]["data_directory"]

    # Trigger the the dataset downloader if the dataset is not present
    #DataSanity(data_dir).dispatch()

    # define the model and train
    segment = Segment(backend, input_size, classes)
    segment.train(config["train"], config["valid"], config["model"])
Ejemplo n.º 2
0
def _main_(args):
    """
    :param args: command line argument
    """

    # parse command line argument
    config_path = args.conf

    # open and load the config json
    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    # parse the json to retrieve the training configuration
    backend = config["model"]["backend"]
    input_size = (config["model"]["im_width"], config["model"]["im_height"])
    classes = config["model"]["classes"]

    # define the model and train
    segment = Segment(backend, input_size, classes)
    segment.train(config["train"])
Ejemplo n.º 3
0
def _main_(args):
    """
    :param args: command line argument
    """

    # parse command line argument
    config_path = args.conf

    # open and load the config json
    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    # parse the json to retrieve the training configuration
    backend = config["model"]["backend"]
    input_size = (config["model"]["im_width"], config["model"]["im_height"])
    classes = config["model"]["classes"]
    data_dir = config["train"]["data_directory"]

    # Trigger the the dataset downloader if the dataset is not present
    DataSanity(data_dir).dispatch()

    # define the model and train
    segment = Segment(backend, input_size, classes)
    segment.train(config["train"])
Ejemplo n.º 4
0
def _main_(args):
    """
    :param args: command line argument
    """

    # Parse command line argument
    config_path = args.conf

    # Open and load the config json
    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    # parse the json to retrieve the training configuration
    backend = config["model"]["backend"]
    input_size = (config["model"]["im_width"], config["model"]["im_height"])
    classes = config["model"]["classes"]

    # define the model and train
    segment = Segment(backend, input_size, classes)

    model = segment.feature_extractor

    # Load best model
    model.load_weights(config['test']['model_file'])

    # Data sequence for testing
    test_gen = DataSequence(config["test"]["test_images"],
                            config["test"]["test_annotations"],
                            config["test"]["test_batch_size"],
                            config["model"]["classes"],
                            config["model"]['im_height'],
                            config["model"]['im_width'],
                            config["model"]['out_height'],
                            config["model"]['out_width'],
                            do_augment=False)

    iou = sm.metrics.IOUScore(threshold=0.5)
    fscore = sm.metrics.FScore(threshold=0.5)
    metrics = [iou, fscore]
    model.compile(optimizer=Adam(0.1),
                  loss="binary_crossentropy",
                  metrics=metrics)

    model.evaluate(test_gen)
    scores = model.evaluate_generator(test_gen)
    print("Loss: {:.5}".format(scores[0]))
    for metric, value in zip(metrics, scores[1:]):
        print("mean {}: {:.5}".format(metric.__name__, value))
Ejemplo n.º 5
0
def _main_(args):
    """
    :param args: command line argument
    """

    # parse command line argument
    config_path = args.conf

    # open and load the config json
    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    # Load best model
    # model = tf.keras.models.load_model(args.model)

    # parse the json to retrieve the training configuration
    backend = config["model"]["backend"]
    input_size = (config["model"]["im_width"], config["model"]["im_height"])
    classes = config["model"]["classes"]

    # define the model and train
    segment = Segment(backend, input_size, classes)

    model = segment.feature_extractor

    # Load best model
    model.load_weights(config['test']['model_file'])

    input_size = (config["model"]["im_width"], config["model"]["im_height"])

    # Create a VideoCapture object and read from input file
    # If the input is the camera, pass 0 instead of the video file name
    cap = cv2.VideoCapture(args.video)

    # Init out video writer
    if args.out_video is not None:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out_vid = cv2.VideoWriter(
            args.out_video, fourcc, cap.get(cv2.CAP_PROP_FPS),
            (config["model"]["out_width"], config["model"]["out_height"]))

    # Check if camera opened successfully
    if (cap.isOpened() == False):
        print("Error opening video stream or file")
        return

    count = 0
    # Read until video is completed
    while (cap.isOpened()):
        # Capture frame-by-frame
        ret, frame = cap.read()
        if not ret:
            break

        raw = cv2.resize(frame, (input_size[0], input_size[1]))

        # Sub mean
        # Because we use it with the training samples, I put it here
        # See in ./src/data/data_utils/data_loader
        img = raw.astype(np.float32)
        img[:, :, 0] -= 103.939
        img[:, :, 1] -= 116.779
        img[:, :, 2] -= 123.68
        img = img[:, :, ::-1]

        net_input = np.expand_dims(img, axis=0)
        preds = model.predict(net_input, verbose=1)
        pred_1 = preds[:, :, :, 1].reshape((input_size[1], input_size[0]))
        pred_2 = preds[:, :, :, 2].reshape((input_size[1], input_size[0]))
        pred_3 = preds[:, :, :, 3].reshape((input_size[1], input_size[0]))

        # Create uint8 masks
        road_mask = np.zeros((input_size[1], input_size[0]), np.uint8)
        car_mask = np.zeros((input_size[1], input_size[0]), np.uint8)
        perdestrian_mask = np.zeros((input_size[1], input_size[0]), np.uint8)
        road_mask[pred_1 > 0.5] = 255
        car_mask[pred_2 > 0.5] = 255
        perdestrian_mask[pred_3 > 0.5] = 255

        # Bind mask with img
        out_img = raw.copy()
        out_img = mask_with_color(out_img, road_mask, color=(0, 0, 255))
        out_img = mask_with_color(out_img, car_mask, color=(0, 255, 0))
        out_img = mask_with_color(out_img, perdestrian_mask, color=(255, 0, 0))

        # Write output
        if args.out_video is not None:
            # Write the frame into the output
            out_vid.write(out_img)

        if args.out_images is not None:
            cv2.imwrite(os.path.join(args.out_images,
                                     str(count) + ".png"), out_img)

        count += 1
        cv2.imshow("out_img", out_img)
        cv2.waitKey(1)

    cap.release()
    if args.out_video is not None:
        out_vid.release()
Ejemplo n.º 6
0
def _main_(args):
    """

    :param args: command line arguments
    """

    # parse command line arguments
    config_path = args.conf

    # open json file and load the configurations
    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    # parse the json to retrieve the training configurations
    backend = config["model"]["backend"]
    input_size = (config["model"]["im_width"], config["model"]["im_height"])
    classes = config["model"]["classes"]
    data_dir = config["train"]["data_directory"] + '/'

    # Trigger the the dataset downloader if the dataset is not present
    DataSanity(data_dir).dispatch()

    # define the model and train
    segment = Segment(backend, input_size, classes)
    # segment.train(config["train"])

    #----------------------------Testing----------------------------------#
    model = segment.feature_extractor

    # load best model
    # model.load_weights(config["train"]["save_model_name"])
    model.load_weights(config["train"]["save_model_name"])

    inps = None
    inps = glob.glob(os.path.join(args.test_input_folder, "*.jpg")) + \
           glob.glob(os.path.join(args.test_input_folder, "*.png")) + \
           glob.glob(os.path.join(args.test_input_folder, "*.jpeg"))

    assert type(inps) is list

    count = 0

    for inp in inps:
        raw = cv2.imread(inp)
        raw = cv2.resize(raw, (input_size[1], input_size[0]))

        # Sub mean
        img = raw.astype(np.float32)
        img[:, :, 0] -= 103.939
        img[:, :, 1] -= 116.779
        img[:, :, 2] -= 123.68
        img = img[:, :, ::-1]

        net_input = np.expand_dims(img, axis=0)
        preds = model.predict(net_input, verbose=1)
        pred_1 = preds[:, :, :, 1].reshape((input_size[1], input_size[0]))
        # pred_2 = preds[:, :, :, 2].reshape((input_size[1], input_size[0]))
        # pred_3 = preds[:, :, :, 3].reshape((input_size[1], input_size[0]))

        # Create uint8 masks
        road_mask = np.zeros((input_size[1], input_size[0]), np.uint8)
        # car_mask = np.zeros((input_size[1], input_size[0]), np.uint8)
        # pedestrian_mask = np.zeros((input_size[1], input_size[0]), np.uint8)
        road_mask[pred_1 > 0.5] = 255
        # car_mask[pred_2 > 0.5] = 255
        # pedestrian_mask[pred_3 > 0.5] = 255

        # Bind mask with img
        out_img = raw.copy()
        out_img = cv2.resize(out_img, (input_size[0], input_size[1]))
        out_img = mask_with_color(out_img, road_mask, color=(0, 255, 0))
        # out_img = mask_with_color(out_img, car_mask, color=(0, 255, 0))
        # out_img = mask_with_color(out_img, pedestrian_mask, color=(255, 0, 0))

        # Write output
        if args.test_output_folder is not None:
            cv2.imwrite(
                os.path.join(args.test_output_folder,
                             str(count) + ".png"), out_img)

        count += 1
        cv2.imshow("out_img", out_img)
        cv2.waitKey(1)
Ejemplo n.º 7
0
mkdir(train_vis_dir)
mkdir(valid_vis_dir)
mkdir(test_vis_dir)

config_path = './config.json'
with open(config_path) as config_buffer:
    config = json.loads(config_buffer.read())
# parse the json to retrieve the training configuration
backend = config["model"]["backend"]
input_size = (config["model"]["im_width"], config["model"]["im_height"])
classes = config["model"]["classes"]
train_data_dir = config["train"]["data_directory"]
valid_data_dir = config["valid"]["data_directory"]

# define the model and train
segment = Segment(backend, input_size, classes)

threshold = 0.5

img_vis_dir_list = ['train_visual/', 'valid_visual/', 'test_visual/']
for img_vis_dir in img_vis_dir_list:
    if 'train' in img_vis_dir:
        img_dir = train_img_dir
    elif 'valid' in img_vis_dir:
        img_dir = valid_img_dir
    elif 'test' in img_vis_dir:
        img_dir = test_img_dir
    else:
        img_dir = ''

    img_name_list = [f for f in listdir(img_dir) if f.endswith('.png')]