Esempio n. 1
0
def train(net):
    net.train()
    priorbox = PriorBox()
    with torch.no_grad():
        priors = priorbox.forward()
        priors = priors.to(device)

    dataloader = DataLoader(VOCDetection(),
                            batch_size=2,
                            collate_fn=detection_collate,
                            num_workers=12)

    for epoch in range(1000):
        loss_ls, loss_cs = [], []
        load_t0 = time.time()
        if epoch > 500:
            adjust_learning_rate(optimizer, 1e-4)

        for images, targets in dataloader:
            images = images.to(device)
            targets = [anno.to(device) for anno in targets]
            out = net(images)
            optimizer.zero_grad()
            loss_l, loss_c = criterion(out, priors, targets)

            loss = 2 * loss_l + loss_c
            loss.backward()
            optimizer.step()
            loss_cs.append(loss_c.item())
            loss_ls.append(loss_l.item())
        load_t1 = time.time()

        print(f'{np.mean(loss_cs)}, {np.mean(loss_ls)} time:{load_t1-load_t0}')
        torch.save(net.state_dict(), 'Final_FaceBoxes.pth')
Esempio n. 2
0
def train(net):
    net.train()
    priorbox = PriorBox()
    with torch.no_grad():
        priors = priorbox.forward()
        priors = priors.to(device)

    dataloader = DataLoader(VOCDetection(), batch_size=2, collate_fn=detection_collate, num_workers=12)

    for epoch in range(1000):
        loss_ls, loss_cs = [], []
        load_t0 = time.time()
        if epoch > 500:
            adjust_learning_rate(optimizer, 1e-4)

        for images, targets in dataloader:
            images = images.to(device)
            targets = [anno.to(device) for anno in targets]
            out = net(images)
            optimizer.zero_grad()
            loss_l, loss_c = criterion(out, priors, targets)

            loss = 2 * loss_l + loss_c
            loss.backward()
            optimizer.step()
            loss_cs.append(loss_c.item())
            loss_ls.append(loss_l.item())
        load_t1 = time.time()

        print(f'{np.mean(loss_cs)}, {np.mean(loss_ls)} time:{load_t1-load_t0}')
        torch.save(net.state_dict(), 'Final_FaceBoxes.pth')
Esempio n. 3
0
    def init_priors(self, feature_maps, image_size):

        # Hacky key system, but works....
        key = ".".join([str(item) for i in range(len(feature_maps)) for item in feature_maps[i]]) + \
              "," + ".".join([str(_) for _ in image_size])
        if key in self.prior_cache:
            return self.prior_cache[key].clone()

        priorbox = PriorBox(self.cfg, image_size, feature_maps)
        prior = priorbox.forward()
        self.prior_cache[key] = prior.clone()
        return prior
Esempio n. 4
0
class SSD(nn.Module):
    """Single Shot Multibox Architecture
    The network is composed of a base VGG network followed by the
    added multibox conv layers.  Each multibox layer branches into
        1) conv2d for class conf scores
        2) conv2d for localization predictions
        3) associated priorbox layer to produce default bounding
           boxes specific to the layer's feature map size.
    See: https://arxiv.org/pdf/1512.02325.pdf for more details.

    Args:
        phase: (string) Can be "test" or "train"
        size: input image size
        base: VGG16 layers for input, size of either 300 or 500
        extras: extra layers that feed to multibox loc and conf layers
        head: "multibox head" consists of loc and conf conv layers
    """

    def __init__(self, phase, size, Basenet, Neck, Head, cfg):
        super(SSD, self).__init__()
        self.phase = phase
        self.cfg = cfg
        self.priorbox = PriorBox(self.cfg)
        self.priors = self.priorbox.forward()
        self.size = size
        # SSD network
        self.basenet = Basenet
        self.neck = Neck
        self.head = Head
        self.num_classes = cfg['num_classes']
        self.softmax = nn.Softmax(dim=-1)
        self.detect = Detect(self.num_classes, 0, 200, 0.01, 0.45,
                             variance=cfg['variance'], nms_kind=cfg['nms_kind'], beta1=cfg['beta1'])

    def forward(self, x, phase):
        """Applies network layers and ops on input image(s) x.

        Args:
            x: input image or batch of images. Shape: [batch,3,300,300].

        Return:
            Depending on phase:
            test:
                Variable(tensor) of output class label predictions,
                confidence score, and corresponding location predictions for
                each object detected. Shape: [batch,topk,7]

            train:
                list of concat outputs from:
                    1: confidence layers, Shape: [batch*num_priors,num_classes]
                    2: localization layers, Shape: [batch,num_priors*4]
                    3: priorbox layers, Shape: [2,num_priors*4]
        """

        x = self.basenet(x)
        if self.neck is not None:
            x = self.neck(x)

        conf, loc = self.head(x)

        loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
        conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
        if phase == "test":
            output = self.detect.trans(
                loc.view(loc.size(0), -1, 4),                   # loc preds
                self.softmax(conf.view(conf.size(0), -1,
                                       self.num_classes)),                # conf preds
                #self.priors.type(type(x.data))                  # default boxes
                self.priors
            )
        else:
            output = (
                loc.view(loc.size(0), -1, 4),
                conf.view(conf.size(0), -1, self.num_classes),
                self.priors
            )
        return output

    def load_weights(self, base_file):
        other, ext = os.path.splitext(base_file)
        if ext == '.pkl' or '.pth':
            print('Loading weights into state dict...')
            self.load_state_dict(torch.load(base_file,
                                            map_location=lambda storage, loc: storage))
            print('Finished!')
        else:
            print('Sorry only .pth and .pkl files supported.')
Esempio n. 5
0
    (show_classes, num_classes, dataset, epoch_size, max_iter, testset) =  load_dataset()

    print('Loading Network...')
    from models.detector import Detector
    model = Detector(args.size, num_classes, args.backbone, args.neck)
    model.train()
    model.cuda()
    num_param = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print('Total param is : {:e}'.format(num_param))

    print('Preparing Optimizer & AnchorBoxes...')
    optimizer = optim.SGD(tencent_trick(model), lr=args.lr, momentum=0.9, weight_decay=0.0005)
    criterion = MultiBoxLoss(num_classes, mutual_guide=args.mutual_guide)
    priorbox = PriorBox(args.base_anchor_size, args.size)
    with torch.no_grad():
        priors = priorbox.forward()
        priors = priors.cuda()

    if args.trained_model is not None:
        print('loading weights from', args.trained_model)
        state_dict = torch.load(args.trained_model)
        model.load_state_dict(state_dict, strict=True)
    else:
        print('Training {}-{} on {} with {} images'.format(args.neck, args.backbone, dataset.name, len(dataset)))
        os.makedirs(args.save_folder, exist_ok=True)
        epoch = 0
        timer = Timer()
        for iteration in range(max_iter):
            if iteration % epoch_size == 0:

                # create batch iterator