예제 #1
0
    def load_model(self, use_onnx, half_precision, cfg_path, weights_path):
        """
            Load model weights into device memory.
        """
        model = Darknet(cfg_path, self.target_size)
        model.load_state_dict(
            torch.load(weights_path, map_location=self.device)['model'])
        model.to(self.device).eval()
        if use_onnx:
            model.fuse()
            img = torch.zeros((1, 3) + self.target_size)
            torch.onnx.export(model,
                              img,
                              'weights/export.onnx',
                              verbose=False,
                              opset_version=10)

            import onnx
            model = onnx.load('weights/export.onnx')
            onnx.checker.check_model(model)
        if half_precision and self.device.type != 'cpu':
            model.half()
        torch.backends.cudnn.benchmark = True
        return model
예제 #2
0
    args = parser.parse_args()
    if not os.path.isfile(args.weights):
        raise SystemExit("Invalid weights file")
    if not os.path.isfile(args.cfg):
        raise SystemExit("Invalid cfg file")
    return args.weights, args.cfg


pt_file, cfg_file = parse_args()

wts_file = pt_file.split(".pt")[0] + ".wts"

device = select_device("cpu")
model = Darknet(cfg_file).to(device)
model.load_state_dict(torch.load(pt_file, map_location=device)["model"])
model.to(device).eval()

with open(wts_file, "w") as f:
    wts_write = ""
    conv_count = 0
    for k, v in model.state_dict().items():
        if not "num_batches_tracked" in k:
            vr = v.reshape(-1).cpu().numpy()
            wts_write += "{} {} ".format(k, len(vr))
            for vv in vr:
                wts_write += " "
                wts_write += struct.pack(">f", float(vv)).hex()
            wts_write += "\n"
            conv_count += 1
    f.write("{}\n".format(conv_count))
    f.write(wts_write)