Beispiel #1
0
def get_arguments():
    args = parse_args(parameters, "trainer", "object detection parameters")

    args.model = parse_choice("model", parameters.model, args.model)
    args.input = parse_choice("input", parameters.input, args.input)

    return args
Beispiel #2
0
def main():
    args = parse_args(parameters, "video detection",
                      "video evaluation parameters")
    print(args)

    model, encoder, model_args = load_model(args.model)
    print("model parameters:")
    print(model_args)

    classes = model_args.dataset.classes

    frames, info = cv.video_capture(args.input)
    print("Input video", info)

    scale = args.scale or 1
    size = (int(info.size[0] * scale), int(info.size[1] * scale))

    evaluate_image = initialise(args.model, model, encoder, size, args.backend)
    evaluate_video(frames,
                   evaluate_image,
                   size,
                   args,
                   classes=classes,
                   fps=info.fps)
Beispiel #3
0
    return image, depth


def image_with_depth(filename):
    base, ext = path.splitext(filename)
    depth = base + ".depth"

    if (flat.image_file(filename) and path.exists(depth)):
        return filename


def display_image_depth(image, depth):
    depth.add_(-depth.min())
    depth = depth.div_(1000 / 256).clamp(max=255).byte()

    depth = cv.gray_to_rgb(depth)

    cv.display(torch.cat([image, depth], 1))


parameters = struct(input=param('', required=True, help="input folder"))

args = parse_args(parameters, "display depth", "")
image_files = flat.find_files(args.input, image_with_depth)

for file in image_files:
    print(file)
    image, depth = load_image_with_depth(file)

    display_image_depth(image, depth)
Beispiel #4
0
from tools import struct, pprint_struct
from tools.parameters import param, parse_args

from tools.image import cv
from main import load_model

from evaluate import evaluate_image
from detection import box, display, detection_table

parameters = struct(model=param('',
                                required=True,
                                help="model checkpoint to use for detection"),
                    input=param('', required=True, help="input image"),
                    threshold=param(0.5, "detection threshold"))

args = parse_args(parameters, "image evaluation",
                  "image evaluation parameters")
device = torch.cuda.current_device()

model, encoder, model_args = load_model(args.model)
print("model parameters:")
pprint_struct(model_args)

classes = model_args.dataset.classes

model.to(device)
encoder.to(device)

frame = cv.imread_color(args.input)

nms_params = detection_table.nms_defaults._extend(nms=args.threshold)
pprint_struct(nms_params)
Beispiel #5
0
                          encoder.debug_keys[debug_index - 1])
                else:
                    print("hiding debug")

        if (key == 27):
            break


if __name__ == '__main__':
    device = torch.cuda.current_device()

    input_parameters = make_input_parameters()
    parameters = detection_parameters._merge(train_parameters)._merge(
        vis_parameters)._merge(input_parameters)._merge(debug_parameters)

    args = parse_args(parameters, "Visualise", "")

    args.model = parse_choice("model", parameters.model, args.model)
    args.input = parse_choice("input", parameters.input, args.input)

    args.dry_run = True
    args.no_load = False

    random.seed(args.seed)
    torch.manual_seed(args.seed)

    pp.pprint(args._to_dicts())

    config, dataset = load_dataset(args)
    env = main.initialise(config, dataset, args)
Beispiel #6
0
from time import time
import json

parameters = struct(model=param('',
                                required=True,
                                help="model checkpoint to use for detection"),
                    input=param('',
                                required=True,
                                help="input video sequence for detection"),
                    scale=param(None, type='float', help="scaling of input"),
                    tensorrt=param(False, help='optimize model with tensorrt'),
                    frames=param(256, help="number of frames to use"),
                    threshold=param(0.3, "detection threshold"),
                    batch=param(8, "batch size for faster evaluation"))

args = parse_args(parameters, "model benchmark", "parameters")
print(args)
device = torch.cuda.current_device()

model, encoder, model_args = load_model(args.model)
print("model parameters:")
print(model_args)

classes = model_args.dataset.classes

model.to(device)
encoder.to(device)

frames, info = cv.video_capture(args.input)
print(info)
Beispiel #7
0
    onnx_model = onnx.load(filename)
    onnx.checker.check_model(onnx_model)

    # graph = onnx.helper.printable_graph(model.graph)
    # print(graph)

    # inferred_model = shape_inference.infer_shapes(model)
    # onnx.checker.check_model(inferred_model)

    # print(model.graph.value_info, inferred_model.graph.value_info)

    # all_passes = optimizer.get_available_passes()
    # optimized = optimizer.optimize(model, all_passes)

    # graph = onnx.helper.printable_graph(model.graph)
    # print(graph)


if __name__ == '__main__':
    args = parse_args(parameters, "export model", "export parameters")
    print(args)
    device = torch.cuda.current_device()
    # device = torch.device('cpu')

    model, encoder, model_args = load_model(args.model)
    print("model parameters:")
    print(model_args)

    size = args.size.split("x")