예제 #1
0
def train(net):
    net.train()
    priorbox = PriorBox()
    with torch.no_grad():
        priors = priorbox.forward()
        priors = priors.to(device)

    dataloader = DataLoader(VOCDetection(),
                            batch_size=2,
                            collate_fn=detection_collate,
                            num_workers=12)

    for epoch in range(1000):
        loss_ls, loss_cs = [], []
        load_t0 = time.time()
        if epoch > 500:
            adjust_learning_rate(optimizer, 1e-4)

        for images, targets in dataloader:
            images = images.to(device)
            targets = [anno.to(device) for anno in targets]
            out = net(images)
            optimizer.zero_grad()
            loss_l, loss_c = criterion(out, priors, targets)

            loss = 2 * loss_l + loss_c
            loss.backward()
            optimizer.step()
            loss_cs.append(loss_c.item())
            loss_ls.append(loss_l.item())
        load_t1 = time.time()

        print(f'{np.mean(loss_cs)}, {np.mean(loss_ls)} time:{load_t1-load_t0}')
        torch.save(net.state_dict(), 'Final_FaceBoxes.pth')
예제 #2
0
    def init_priors(self, feature_maps, image_size):

        # Hacky key system, but works....
        key = ".".join([str(item) for i in range(len(feature_maps)) for item in feature_maps[i]]) + \
              "," + ".".join([str(_) for _ in image_size])
        if key in self.prior_cache:
            return self.prior_cache[key].clone()

        priorbox = PriorBox(self.cfg, image_size, feature_maps)
        prior = priorbox.forward()
        self.prior_cache[key] = prior.clone()
        return prior
예제 #3
0
 def __init__(self, confidence_threshold=0.02, top_k=1000, nms_threshold=0.4, keep_top_k=500, vis_thres=0.6):
     self.net = Retina(cfg=cfg_mnet).to(device)
     self.net = load_model(self.net, 'mnet_plate.pth', False)
     self.net.eval()
     self.lprnet = plate_recogition()
     self.priorbox = PriorBox(cfg_mnet)
     self.confidence_threshold = confidence_threshold
     self.top_k = top_k
     self.nms_threshold = nms_threshold
     self.keep_top_k = keep_top_k
     self.vis_thres = vis_thres
     self.resize = 1
     self.points_ref = np.float32([[0, 0], [94, 0], [0, 24], [94, 24]])
예제 #4
0
 def __init__(self, phase, size, Backbone, Neck, Head, cfg):
     super(SSD, self).__init__()
     self.phase = phase
     self.cfg = cfg
     self.priorbox = PriorBox(self.cfg)
     self.priors = self.priorbox.forward()
     self.size = size
     # SSD network
     self.backbone = Backbone
     self.neck = Neck
     self.head = Head
     self.num_classes = cfg['num_classes']
     self.softmax = nn.Softmax(dim=-1)
     self.detect = Detect(self.num_classes , 0, 200, 0.01, 0.45,variance = cfg['variance'], nms_kind=cfg['nms_kind'], beta1=cfg['beta1'])
예제 #5
0
    def __init__(self, model_path, gpu_ids, layers, score_thresh=0.5):
        """
        检测整体基本流程
        :param model_path: 模型路径
        :param gpu_ids: gpu序列号
        :param layers: 18 , 50
        :param score_thresh: 置信度过滤
        """
        self.keep_top_k = 100
        self.nms_threshold = 0.3
        self.nms_score = score_thresh
        self.nms_threshold = self.nms_threshold

        self.test_size = 640
        self.__model_path = model_path
        self.__gpu_ids = gpu_ids

        self.device = torch.device('cuda:{}'.format(str(gpu_ids)) if torch.
                                   cuda.is_available() else 'cpu')

        self.layers = layers
        self.model = RetinaFace(self.layers)
        self.model = self.__load_model(self.model, self.__model_path)
        self.model = self.model.to(self.device)
        self.model.eval()

        self.priorbox = PriorBox(box_specs_list=[[(0.6, 0.5), (0.75, 1.),
                                                  (0.9, 1.)],
                                                 [(0.2, 0.5), (0.4, 1.),
                                                  (0.6, 1.)],
                                                 [(0.05, 0.5), (0.1, 1.),
                                                  (0.2, 1.)],
                                                 [(0.0125, 0.5), (0.025, 1.),
                                                  (0.05, 1.)]],
                                 base_anchor_size=[1.0, 1.0])
        self.priors = self.priorbox.generate(feature_map_shape_list=[(10, 10),
                                                                     (20, 20),
                                                                     (40, 40),
                                                                     (80, 80)],
                                             im_height=640,
                                             im_width=640)
        self.priors = self.priors.to(self.device)

        self.mean = torch.Tensor([104, 117, 123]).to(self.device)
        self.variance = torch.Tensor([0.1, 0.2]).to(self.device)
        self.Decode = Decode(self.priors.data, self.variance)
예제 #6
0
    print('Loading Dataset...')
    (show_classes, num_classes, dataset, epoch_size, max_iter, testset) =  load_dataset()

    print('Loading Network...')
    from models.detector import Detector
    model = Detector(args.size, num_classes, args.backbone, args.neck)
    model.train()
    model.cuda()
    num_param = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print('Total param is : {:e}'.format(num_param))

    print('Preparing Optimizer & AnchorBoxes...')
    optimizer = optim.SGD(tencent_trick(model), lr=args.lr, momentum=0.9, weight_decay=0.0005)
    criterion = MultiBoxLoss(num_classes, mutual_guide=args.mutual_guide)
    priorbox = PriorBox(args.base_anchor_size, args.size)
    with torch.no_grad():
        priors = priorbox.forward()
        priors = priors.cuda()

    if args.trained_model is not None:
        print('loading weights from', args.trained_model)
        state_dict = torch.load(args.trained_model)
        model.load_state_dict(state_dict, strict=True)
    else:
        print('Training {}-{} on {} with {} images'.format(args.neck, args.backbone, dataset.name, len(dataset)))
        os.makedirs(args.save_folder, exist_ok=True)
        epoch = 0
        timer = Timer()
        for iteration in range(max_iter):
            if iteration % epoch_size == 0:
예제 #7
0
                "output3": {0: "batch_size"}}
torch.onnx.export(net, dummy_input, onnx_output, verbose=True,
                  input_names=input_names,
                  output_names=output_names,
                  opset_version=12,
                  dynamic_axes=dynamic_axes)
if False:
    model = onnx.load(onnx_output)
    model_simp, check = simplify(model)
    assert check, "Simplified ONNX model could not be validated"
    output_path = 'simp.onnx'
    onnx.save(model_simp, output_path)
    print('finished exporting onnx ')

img_path = 'export/028125-87_110-204&496_524&585-506&564_204&585_210&514_524&496-0_0_5_24_29_33_24_24-52-45.jpg'
priorbox = PriorBox(cfg_mnet)
points_ref = np.float32([[0, 0], [94, 0], [0, 24], [94, 24]])
confidence_threshold=0.02
top_k=1000
nms_threshold=0.4
keep_top_k=500
vis_thres=0.6

srcimg = cv2.imread(img_path)
img = srcimg.astype('float32')
im_height, im_width, _ = img.shape
img -= (104, 117, 123)
with torch.no_grad():
    scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]]).to(device)
    img = torch.from_numpy(img).permute(2, 0, 1).unsqueeze(0).to(device)
    loc, conf, landms = net(img)  # forward pass