예제 #1
0
 def __init__(self, cfg, weight, batchSize=1):
     self.det_model = Darknet(cfg)
     # self.det_model.load_state_dict(torch.load('models/yolo/yolov3-spp.weights', map_location="cuda:0")['model'])
     self.det_model.load_weights(weight)
     self.det_model.net_info['height'] = input_size
     self.det_inp_dim = int(self.det_model.net_info['height'])
     assert self.det_inp_dim % 32 == 0
     assert self.det_inp_dim > 32
     if device != "cpu":
         self.det_model.cuda()
     inf_time = get_inference_time(self.det_model,
                                   height=input_size,
                                   width=input_size)
     flops = print_model_param_flops(self.det_model,
                                     input_width=input_size,
                                     input_height=input_size)
     params = print_model_param_nums(self.det_model)
     print("Detection: Inference time {}s, Params {}, FLOPs {}".format(
         inf_time, params, flops))
     if libtorch:
         example = torch.rand(2, 3, 224, 224)
         traced_model = torch.jit.trace(self.det_model, example)
         traced_model.save("det_lib.pt")
     self.det_model.eval()
     self.im_dim_list = []
     self.batchSize = batchSize
     self.mul_img = False
예제 #2
0
    def __init__(self, batchSize=1):
        self.det_model = Darknet("src/yolo/cfg/yolov3-spp.cfg")
        self.det_model.load_weights('models/yolo/yolov3-spp.weights')
        self.det_model.net_info['height'] = config.input_size
        self.det_inp_dim = int(self.det_model.net_info['height'])
        assert self.det_inp_dim % 32 == 0
        assert self.det_inp_dim > 32
        self.det_model.cuda()
        self.det_model.eval()

        self.stopped = False
        self.batchSize = batchSize
예제 #3
0
    def __init__(self, batchSize=1):
        self.det_model = Darknet(config.yolo_cfg)
        # self.det_model.load_state_dict(torch.load('models/yolo/yolov3-spp.weights', map_location="cuda:0")['model'])
        self.det_model.load_weights(config.yolo_model)
        self.det_model.net_info['height'] = config.input_size
        self.det_inp_dim = int(self.det_model.net_info['height'])
        assert self.det_inp_dim % 32 == 0
        assert self.det_inp_dim > 32
        if device != "cpu":
            self.det_model.cuda()
        self.det_model.eval()

        self.stopped = False
        self.batchSize = batchSize
예제 #4
0
    def __init__(self, cfg, weight, batchSize=1):
        self.det_model = Darknet(cfg)
        self.det_model.load_weights(weight)
        self.det_model.net_info['height'] = opt.input_size
        self.det_inp_dim = int(self.det_model.net_info['height'])
        assert self.det_inp_dim % 32 == 0
        assert self.det_inp_dim > 32
        if device != "cpu":
            self.det_model.cuda()
        inf_time = get_inference_time(self.det_model, height=opt.input_size, width=opt.input_size)
        flops = print_model_param_flops(self.det_model, input_width=opt.input_size, input_height=opt.input_size)
        params = print_model_param_nums(self.det_model)
        print("Detection: Inference time {}s, Params {}, FLOPs {}".format(inf_time, params, flops))
        self.det_model.eval()

        self.im_dim_list = []
        self.batchSize = batchSize
예제 #5
0
    def __init__(self, cfg, weight, batchSize=1):
        self.det_model = Darknet(cfg)
        # self.det_model.load_state_dict(torch.load('models/yolo/yolov3-spp.weights', map_location="cuda:0")['model'])
        self.det_model.load_weights(weight)
        self.det_model.net_info['height'] = config.input_size
        self.det_inp_dim = int(self.det_model.net_info['height'])
        assert self.det_inp_dim % 32 == 0
        assert self.det_inp_dim > 32
        if device != "cpu":
            self.det_model.cuda()
        inf_time = get_inference_time(self.det_model,
                                      height=config.input_size,
                                      width=config.input_size)
        flops = print_model_param_flops(self.det_model,
                                        input_width=config.input_size,
                                        input_height=config.input_size)
        params = print_model_param_nums(self.det_model)
        print("Detection: Inference time {}s, Params {}, FLOPs {}".format(
            inf_time, params, flops))
        self.det_model.eval()

        self.im_dim_list = []
        self.batchSize = batchSize