Esempio n. 1
0
p = 0.4
num_classes = 2
batch_size = args.batch_size
weight_decay = 0.0005
gamma = 0.1
momentum = 0.9

#### tensorboardX 可视化 ####
# tensorboard log directory
# LOG_DIR = 'runs'
log_path = os.path.join('runs', datetime.now().isoformat())
if not os.path.exists(log_path):
    os.makedirs(log_path)
writer = SummaryWriter(log_dir=log_path)

net = build_net('train', num_classes)  # RFBNet网络构建
print(net)
if args.resume_net == None:  # args.basenet = './weights/vgg16_reducedfc.pth'

    def xavier(param):
        init.xavier_uniform(param)

    def weights_init(m):
        for key in m.state_dict():  # 新增层参数的初始化
            if key.split('.')[-1] == 'weight':
                if 'conv' in key:
                    init.kaiming_normal_(m.state_dict()[key],
                                         mode='fan_out')  # conv层参数的初始化
                if 'bn' in key:
                    m.state_dict()[key][...] = 1
            elif key.split('.')[-1] == 'bias':  # bias初始化为0
Esempio n. 2
0
elif args.version == 'RFB_mobile':
    from models.RFB_Net_mobile import build_net
    cfg = COCO_mobile_300
else:
    print('Unkown version!')

img_dim = (300,512)[args.size=='512']
rgb_means = ((104, 117, 123),(103.94,116.78,123.68))[args.version == 'RFB_mobile']
p = (0.6,0.2)[args.version == 'RFB_mobile']
num_classes = (21, 81)[args.dataset == 'COCO']
batch_size = args.batch_size
weight_decay = 0.0005
gamma = 0.1
momentum = 0.9

net = build_net('train', img_dim, num_classes)
print(net)
if args.resume_net == None:
    base_weights = torch.load(args.basenet)
    print('Loading base network...')
    net.base.load_state_dict(base_weights)

    def xavier(param):
        init.xavier_uniform(param)

    def weights_init(m):
        for key in m.state_dict():
            if key.split('.')[-1] == 'weight':
                if 'conv' in key:
                    init.kaiming_normal(m.state_dict()[key], mode='fan_out')
                if 'bn' in key:
Esempio n. 3
0
                .format(i + 1, num_images, detect_time, nms_time))
            _t['im_detect'].clear()
            _t['misc'].clear()

    with open(det_file, 'wb') as f: # 保存整理的缓存结果
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)

    print('Evaluating detections')
    testset.evaluate_detections(all_boxes, save_folder) # 评估函数,可以结合voc_eval.py看看


if __name__ == '__main__':
    # load net
    img_dim = (300,512)[args.size=='512']
    num_classes = (21, 81)[args.dataset == 'COCO']
    net = build_net('test', img_dim, num_classes) # initialize detector,初始化网络结构,只是建立了网络结构,未灌入模型参数
    state_dict = torch.load(args.trained_model) # 读训练好的模型
    # create new OrderedDict that does not contain `module.`

    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        head = k[:7]
        if head == 'module.':
            name = k[7:] # remove `module.`
        else:
            name = k
        new_state_dict[name] = v
    net.load_state_dict(new_state_dict) # 将读取的模型参数,灌入net中
    net.eval() # 现在模型参数就有啦,可以进入评估模式了
    print('Finished loading model!')
Esempio n. 4
0
                node_name = '%s\n %s' % (name, size_to_str(u.size()))
                dot.node(str(id(var)), node_name, fillcolor='lightblue')
            else:
                dot.node(str(id(var)), str(type(var).__name__))
            seen.add(var)
            if hasattr(var, 'next_functions'):
                for u in var.next_functions:
                    if u[0] is not None:
                        dot.edge(str(id(u[0])), str(id(var)))
                        add_nodes(u[0])
            if hasattr(var, 'saved_tensors'):
                for t in var.saved_tensors:
                    dot.edge(str(id(t)), str(id(var)))
                    add_nodes(t)

    add_nodes(var.grad_fn)
    return dot


if __name__ == '__main__':
    img_size = 300
    num_classes = 21
    net = build_net('test', img_size, num_classes)
    net.eval()
    print(net)

    x = Variable(torch.randn(1, 3, 300, 300))
    y = net(x)
    g = make_dot(y[0])
    g.view()
Esempio n. 5
0
        # match priors with gt
        for idx in range(num):  # batch_size
            truths = targets[idx][:, :-2].data  # [obj_num, 4]
            labels = targets[idx][:, -2:].data  # [obj_num]
            defaults = priors.data  # [num_priors,4]
            match(overlap_threshold, truths, defaults, [0.1, 0.2], labels, loc_t, conf_t, obj_t, idx)

        conf_data_list = [conf_data[conf_t[:, :, 0] == i] for i in range(1, num_classes)]
        cls_list = [torch.cat((cls_list[i], conf_data_list[i]), 0) for i in range(num_classes-1)]
    cls_list = [(item / item.norm(dim=1, keepdim=True)).mean(0) for item in cls_list]
    if args.setting == 'incre':
        cls_list = cls_list[15:]
    if isinstance(model, (DistributedDataParallel, DataParallel)):
        model.module.OBJ_Target.weight.data = torch.stack([item / item.norm() for item in cls_list], 0)
    else:
        model.OBJ_Target.weight.data = torch.stack([item / item.norm() for item in cls_list], 0)


if __name__ == '__main__':
    model = build_net(args, img_dim, src_cls_dim)
    logger.info("Model:\n{}".format(model))
    if args.cuda and torch.cuda.is_available():
        model.device = 'cuda'
        model.cuda()
        cudnn.benchmark = True
        if args.ngpu > 1:
            model = torch.nn.DataParallel(model, device_ids=list(range(args.ngpu)))
    else:
        model.device = 'cpu'
    train(model, args.resume)
Esempio n. 6
0
                .format(i + 1, num_images, detect_time, nms_time))
            _t['im_detect'].clear()
            _t['misc'].clear()

    with open(det_file, 'wb') as f:
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)

    print('Evaluating detections')
    testset.evaluate_detections(all_boxes, save_folder)


if __name__ == '__main__':
    # load net
    img_dim = (300,512)[args.size=='512']
    num_classes = (21, 81)[args.dataset == 'COCO']
    net = build_net('test', img_dim, num_classes)    # initialize detector
    state_dict = torch.load(args.trained_model)
    # create new OrderedDict that does not contain `module.`

    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        head = k[:7]
        if head == 'module.':
            name = k[7:] # remove `module.`
        else:
            name = k
        new_state_dict[name] = v
    net.load_state_dict(new_state_dict)
    net.eval()
    print('Finished loading model!')
Esempio n. 7
0
                .format(i + 1, num_images, detect_time, nms_time))
            _t['im_detect'].clear()
            _t['misc'].clear()

    with open(det_file, 'wb') as f:
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)

    print('Evaluating detections')
    testset.evaluate_detections(all_boxes, save_folder)


if __name__ == '__main__':
    # load net
    img_dim = (320,512)[args.size=='512']
    num_classes = (21, 81)[args.dataset == 'COCO']
    net = build_net('test', img_dim, num_classes, C_agnostic)    # initialize detector
    state_dict = torch.load(args.trained_model)
    # create new OrderedDict that does not contain `module.`

    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        head = k[:7]
        if head == 'module.':
            name = k[7:] # remove `module.`
        else:
            name = k
        new_state_dict[name] = v
    net.load_state_dict(new_state_dict)
    net.eval()
    print('Finished loading model!')
Esempio n. 8
0
else:
    print('Unkown version!')
# cfg = VOC_300
cfg = COCO_512

priorbox = PriorBox(cfg)
with torch.no_grad():
    priors = priorbox.forward()
    if args.cuda:
        priors = priors.cuda()
# numclass = 21
numclass = 15
start_load = time.time()
img = cv2.imread(args.img)
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
net = build_net('test', args.size, numclass)  # initialize detector

transform = BaseTransform(net.size, (123, 117, 104), (2, 0, 1))
with torch.no_grad():
    x = transform(img).unsqueeze(0)
    if args.cuda:
        x = x.cuda()
        scale = scale.cuda()
state_dict = torch.load(args.trained_model, map_location='cpu')
# create new OrderedDict that does not contain `module.`
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
    head = k[:7]
    if head == 'module.':
        name = k[7:]  # remove `module.`
Esempio n. 9
0
    def Train(self,
              epochs=200,
              log_iters=True,
              output_weights_dir="weights",
              saved_epoch_interval=10):
        self.system_dict["params"]["max_epoch"] = epochs
        self.system_dict["params"]["log_iters"] = log_iters
        self.system_dict["params"]["save_folder"] = output_weights_dir

        if not os.path.exists(self.system_dict["params"]["save_folder"]):
            os.mkdir(self.system_dict["params"]["save_folder"])

        if (self.system_dict["params"]["size"] == 300):
            cfg = COCO_300
        else:
            cfg = COCO_512

        if self.system_dict["params"]["version"] == 'RFB_vgg':
            from models.RFB_Net_vgg import build_net
        elif self.system_dict["params"]["version"] == 'RFB_E_vgg':
            from models.RFB_Net_E_vgg import build_net
        elif self.system_dict["params"]["version"] == 'RFB_mobile':
            from models.RFB_Net_mobile import build_net
            cfg = COCO_mobile_300
        else:
            print('Unkown version!')

        img_dim = (300, 512)[self.system_dict["params"]["size"] == 512]
        rgb_means = ((104, 117, 123), (
            103.94, 116.78,
            123.68))[self.system_dict["params"]["version"] == 'RFB_mobile']
        p = (0.6, 0.2)[self.system_dict["params"]["version"] == 'RFB_mobile']

        f = open(
            self.system_dict["dataset"]["train"]["root_dir"] + "/" +
            self.system_dict["dataset"]["train"]["coco_dir"] +
            "/annotations/classes.txt", 'r')
        lines = f.readlines()
        if (lines[-1] == ""):
            num_classes = len(lines) - 1
        else:
            num_classes = len(lines) + 1

        batch_size = self.system_dict["params"]["batch_size"]
        weight_decay = self.system_dict["params"]["weight_decay"]
        gamma = self.system_dict["params"]["gamma"]
        momentum = self.system_dict["params"]["momentum"]

        self.system_dict["local"]["net"] = build_net('train', img_dim,
                                                     num_classes)

        if self.system_dict["params"]["resume_net"] == None:
            base_weights = torch.load(self.system_dict["params"]["basenet"])
            print('Loading base network...')
            self.system_dict["local"]["net"].base.load_state_dict(base_weights)

            def xavier(param):
                init.xavier_uniform(param)

            def weights_init(m):
                for key in m.state_dict():
                    if key.split('.')[-1] == 'weight':
                        if 'conv' in key:
                            init.kaiming_normal_(m.state_dict()[key],
                                                 mode='fan_out')
                        if 'bn' in key:
                            m.state_dict()[key][...] = 1
                    elif key.split('.')[-1] == 'bias':
                        m.state_dict()[key][...] = 0

            print('Initializing weights...')
            # initialize newly added layers' weights with kaiming_normal method
            self.system_dict["local"]["net"].extras.apply(weights_init)
            self.system_dict["local"]["net"].loc.apply(weights_init)
            self.system_dict["local"]["net"].conf.apply(weights_init)
            self.system_dict["local"]["net"].Norm.apply(weights_init)
            if self.system_dict["params"]["version"] == 'RFB_E_vgg':
                self.system_dict["local"]["net"].reduce.apply(weights_init)
                self.system_dict["local"]["net"].up_reduce.apply(weights_init)

        else:
            # load resume network
            print('Loading resume network...')
            state_dict = torch.load(self.system_dict["params"]["resume_net"])
            # create new OrderedDict that does not contain `module.`
            from collections import OrderedDict
            new_state_dict = OrderedDict()
            for k, v in state_dict.items():
                head = k[:7]
                if head == 'module.':
                    name = k[7:]  # remove `module.`
                else:
                    name = k
                new_state_dict[name] = v
            self.system_dict["local"]["net"].load_state_dict(new_state_dict)

        if self.system_dict["params"]["ngpu"] > 1:
            self.system_dict["local"]["net"] = torch.nn.DataParallel(
                self.system_dict["local"]["net"],
                device_ids=list(range(self.system_dict["params"]["ngpu"])))

        if self.system_dict["params"]["cuda"]:
            self.system_dict["local"]["net"].cuda()
            cudnn.benchmark = True

        optimizer = optim.SGD(
            self.system_dict["local"]["net"].parameters(),
            lr=self.system_dict["params"]["lr"],
            momentum=self.system_dict["params"]["momentum"],
            weight_decay=self.system_dict["params"]["weight_decay"])
        #optimizer = optim.RMSprop(self.system_dict["local"]["net"].parameters(), lr=self.system_dict["params"]["lr"], alpha = 0.9, eps=1e-08,
        #                      momentum=self.system_dict["params"]["momentum"], weight_decay=self.system_dict["params"]["weight_decay"])

        criterion = MultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5,
                                 False)
        priorbox = PriorBox(cfg)
        with torch.no_grad():
            priors = priorbox.forward()
            if self.system_dict["params"]["cuda"]:
                priors = priors.cuda()

        self.system_dict["local"]["net"].train()
        # loss counters
        loc_loss = 0  # epoch
        conf_loss = 0
        epoch = 0 + self.system_dict["params"]["resume_epoch"]
        print('Loading Dataset...')

        if (os.path.isdir("coco_cache")):
            os.system("rm -r coco_cache")

        dataset = COCODetection(
            self.system_dict["dataset"]["train"]["root_dir"],
            self.system_dict["dataset"]["train"]["coco_dir"],
            self.system_dict["dataset"]["train"]["set_dir"],
            preproc(img_dim, rgb_means, p))

        epoch_size = len(dataset) // self.system_dict["params"]["batch_size"]
        max_iter = self.system_dict["params"]["max_epoch"] * epoch_size

        stepvalues = (90 * epoch_size, 120 * epoch_size, 140 * epoch_size)
        print('Training', self.system_dict["params"]["version"], 'on',
              dataset.name)
        step_index = 0

        if self.system_dict["params"]["resume_epoch"] > 0:
            start_iter = self.system_dict["params"]["resume_epoch"] * epoch_size
        else:
            start_iter = 0

        lr = self.system_dict["params"]["lr"]

        for iteration in range(start_iter, max_iter):
            if iteration % epoch_size == 0:
                # create batch iterator
                batch_iterator = iter(
                    data.DataLoader(
                        dataset,
                        batch_size,
                        shuffle=True,
                        num_workers=self.system_dict["params"]["num_workers"],
                        collate_fn=detection_collate))
                loc_loss = 0
                conf_loss = 0

                torch.save(
                    self.system_dict["local"]["net"].state_dict(),
                    self.system_dict["params"]["save_folder"] + "/" +
                    self.system_dict["params"]["version"] + '_' +
                    self.system_dict["params"]["dataset"] + '_epoches_' +
                    'intermediate' + '.pth')
                epoch += 1

            load_t0 = time.time()
            if iteration in stepvalues:
                step_index += 1
            lr = self.adjust_learning_rate(optimizer,
                                           self.system_dict["params"]["gamma"],
                                           epoch, step_index, iteration,
                                           epoch_size)

            # load train data
            images, targets = next(batch_iterator)

            #print(np.sum([torch.sum(anno[:,-1] == 2) for anno in targets]))

            if self.system_dict["params"]["cuda"]:
                images = Variable(images.cuda())
                targets = [Variable(anno.cuda()) for anno in targets]
            else:
                images = Variable(images)
                targets = [Variable(anno) for anno in targets]
            # forward
            t0 = time.time()
            out = self.system_dict["local"]["net"](images)
            # backprop
            optimizer.zero_grad()
            loss_l, loss_c = criterion(out, priors, targets)
            loss = loss_l + loss_c
            loss.backward()
            optimizer.step()
            t1 = time.time()
            loc_loss += loss_l.item()
            conf_loss += loss_c.item()
            load_t1 = time.time()
            if iteration % saved_epoch_interval == 0:
                print('Epoch:' + repr(epoch) + ' || epochiter: ' +
                      repr(iteration % epoch_size) + '/' + repr(epoch_size) +
                      '|| Current iter ' + repr(iteration) + '|| Total iter ' +
                      repr(max_iter) + ' || L: %.4f C: %.4f||' %
                      (loss_l.item(), loss_c.item()) +
                      'Batch time: %.4f sec. ||' % (load_t1 - load_t0) +
                      'LR: %.8f' % (lr))

        torch.save(
            self.system_dict["local"]["net"].state_dict(),
            self.system_dict["params"]["save_folder"] + "/" + 'Final_' +
            self.system_dict["params"]["version"] + '_' +
            self.system_dict["params"]["dataset"] + '.pth')
Esempio n. 10
0
def train():
    if not os.path.exists(args.save_folder):
        os.mkdir(args.save_folder)

    if args.dataset == 'VOC':
        train_sets = [('2007', 'trainval'), ('2012', 'trainval')]
        cfg = (VOC_300, VOC_512)[args.size == '512']
    else:
        train_sets = [('2014', 'train'), ('2014', 'valminusminival')]
        cfg = (COCO_300, COCO_512)[args.size == '512']

    if args.version == 'RFB_vgg':
        from models.RFB_Net_vgg import build_net
    elif args.version == 'RFB_E_vgg':
        from models.RFB_Net_E_vgg import build_net
    elif args.version == 'RFB_d2':
        from models.RFB_Net_vgg_d2 import build_net
    elif args.version == 'RFB_d3':
        from models.RFB_Net_vgg_d3 import build_net
    elif args.version == 'RFB_d4':
        from models.RFB_Net_vgg_d4 import build_net
    elif args.version == 'RFB_d4_fpn':
        from models.RFB_Net_vgg_d4_fpn import build_net
    elif args.version == 'RFB_mobile':
        from models.RFB_Net_mobile import build_net
        cfg = COCO_mobile_300
    else:
        print('Unkown version!')
    logging.info('build model version: {}'.format(args.version))

    img_dim = (300, 512)[args.size == '512']
    rgb_means = ((104, 117, 123), (103.94, 116.78,
                                   123.68))[args.version == 'RFB_mobile']
    p = (0.6, 0.2)[args.version == 'RFB_mobile']
    # 738:6 classes ; 2392:7 ; 8718:6
    num_classes = (21, 81)[args.dataset == 'COCO']
    logging.info('dataset number of classes: {}'.format(num_classes))
    batch_size = args.batch_size
    weight_decay = 0.0005
    gamma = 0.1
    momentum = 0.9

    net = build_net('train', img_dim, num_classes)
    # print(net)
    if args.resume_net == None:
        base_weights = torch.load(args.basenet)
        from collections import OrderedDict
        print('Loading base network...')
        net.base.load_state_dict(base_weights)

        def xavier(param):
            init.xavier_uniform(param)

        def weights_init(m):
            for key in m.state_dict():
                if key.split('.')[-1] == 'weight':
                    if 'conv' in key:
                        init.kaiming_normal_(m.state_dict()[key],
                                             mode='fan_out')
                    if 'bn' in key:
                        m.state_dict()[key][...] = 1
                elif key.split('.')[-1] == 'bias':
                    m.state_dict()[key][...] = 0

        print('Initializing weights...')
        # initialize newly added layers' weights with kaiming_normal method
        net.extras.apply(weights_init)
        net.loc.apply(weights_init)
        net.conf.apply(weights_init)
        net.Norm.apply(weights_init)
        if args.version == 'RFB_E_vgg':
            net.reduce.apply(weights_init)
            net.up_reduce.apply(weights_init)
    else:
        # load resume network
        print('Loading resume network...')
        state_dict = torch.load(args.resume_net)
        # create new OrderedDict that does not contain `module.`
        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            head = k[:7]
            if head == 'module.':
                name = k[7:]  # remove `module.`
            else:
                name = k
            new_state_dict[name] = v
        net.load_state_dict(new_state_dict)

    if args.ngpu > 1:
        net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))

    if args.cuda:
        net.cuda()
        cudnn.benchmark = True

    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    # optimizer = optim.RMSprop(net.parameters(), lr=args.lr,alpha = 0.9, eps=1e-08,
    #                      momentum=args.momentum, weight_decay=args.weight_decay)
    criterion = MultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5, False)
    priorbox = PriorBox(cfg)
    with torch.no_grad():
        priors = priorbox.forward()
        if args.cuda:
            priors = priors.cuda()

    net.train()
    # loss counters
    loc_loss = 0  # epoch
    conf_loss = 0
    epoch = 0 + args.resume_epoch

    logging.info('Loading Dataset: {}'.format(args.dataset))
    if args.dataset == 'VOC':
        dataset = VOCDetection(VOCroot, train_sets,
                               preproc(img_dim, rgb_means, p),
                               AnnotationTransform())
    elif args.dataset == 'COCO':
        dataset = COCODetection(COCOroot, train_sets,
                                preproc(img_dim, rgb_means, p))
    else:
        print('Only VOC and COCO are supported now!')
        return

    epoch_size = len(dataset) // args.batch_size
    max_iter = args.max_epoch * epoch_size

    stepvalues_VOC = (150 * epoch_size, 200 * epoch_size, 250 * epoch_size)
    stepvalues_COCO = (90 * epoch_size, 120 * epoch_size, 140 * epoch_size)
    stepvalues = (stepvalues_VOC, stepvalues_COCO)[args.dataset == 'COCO']
    print('Training', args.version, 'on', dataset.name)
    step_index = 0

    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
    else:
        start_iter = 0

    lr = args.lr
    for iteration in range(start_iter, max_iter):
        if iteration % epoch_size == 0:
            # create batch iterator
            batch_iterator = iter(
                data.DataLoader(dataset,
                                batch_size,
                                shuffle=True,
                                num_workers=args.num_workers,
                                collate_fn=detection_collate))
            loc_loss = 0
            conf_loss = 0
            if (epoch % 10 == 0 and epoch > 0) or (epoch % 5 == 0
                                                   and epoch > 200):
                torch.save(
                    net.state_dict(), args.save_folder + args.version + '_' +
                    args.dataset + '_epoches_' + repr(epoch) + '.pth')
            epoch += 1

        load_t0 = time.time()
        if iteration in stepvalues:
            step_index += 1
        lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index,
                                  iteration, epoch_size)

        # load train data
        images, targets = next(batch_iterator)
        #print(np.sum([torch.sum(anno[:,-1] == 2) for anno in targets]))
        if args.cuda:
            images = Variable(images.cuda())
            targets = [Variable(anno.cuda()) for anno in targets]
        else:
            images = Variable(images)
            targets = [Variable(anno) for anno in targets]
        # forward
        t0 = time.time()
        out = net(images)
        # backprop
        optimizer.zero_grad()
        loss_l, loss_c = criterion(out, priors, targets)
        loss = loss_l + loss_c
        loss.backward()
        optimizer.step()
        t1 = time.time()
        loc_loss += loss_l.item()
        conf_loss += loss_c.item()
        load_t1 = time.time()
        if iteration % 10 == 0:
            print('Epoch:' + repr(epoch) + ' || epochiter: ' +
                  repr(iteration % epoch_size) + '/' + repr(epoch_size) +
                  '|| Totel iter ' + repr(iteration) +
                  ' || L: %.4f C: %.4f||' % (loss_l.item(), loss_c.item()) +
                  'Batch time: %.4f sec. ||' % (load_t1 - load_t0) +
                  'LR: %.8f' % (lr))
    torch.save(
        net.state_dict(),
        os.path.join(args.save_folder,
                     'Final_' + args.version + '_' + args.dataset + '.pth'))
Esempio n. 11
0
            save_dir = save_dir + '_divided_mode'
        if not os.path.exists(save_dir):
            os.mkdir(save_dir)
    else:
        raise AssertionError('ERROR::TYPE IS NOT CORRECT')

    # Setting network
    print('Network setting...')
    img_dim = (300, 512)[args.size == '512']
    rgb_means = ((103.94, 116.78, 123.68),
                 (104, 117, 123))[args.version == 'RFB_vgg'
                                  or args.version == 'RFB_E_vgg']
    p = (0.2, 0.6)[args.version == 'RFB_vgg' or args.version == 'RFB_E_vgg']

    print('Loading pretrained model')
    net = build_net('test', 300, num_classes)  # initialize detector
    state_dict = torch.load(args.trained_model)
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        head = k[:7]
        if head == 'module.':
            name = k[7:]  # remove `module.`
        else:
            name = k
        new_state_dict[name] = v

    try:
        net.load_state_dict(new_state_dict)
    except RuntimeError:
        print('ERROR::The version and weight file is not correct')
Esempio n. 12
0
            _t['im_detect'].clear()
            _t['misc'].clear()

    with open(det_file, 'wb') as f:
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)

    print('Evaluating detections')
    testset.evaluate_detections(all_boxes, save_folder)


if __name__ == '__main__':
    # load net
    img_dim = (300,512)[args.size=='512']
    num_classes = (21, 81)[args.dataset == 'COCO']
    #net = build_net('test', img_dim, num_classes)    # initialize detector
    net = build_net('test', img_dim, num_classes)
    state_dict = torch.load(args.trained_model)
    # create new OrderedDict that does not contain `module.`

    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        head = k[:7]
        if head == 'module.':
            name = k[7:] # remove `module.`
        else:
            name = k
        new_state_dict[name] = v
    net.load_state_dict(new_state_dict)
    net.eval()
    print('Finished loading model!')
Esempio n. 13
0
    def Setup(self):
        '''
        User function: Setup all parameters

        Args:
            None

        Returns:
            None
        '''
        if (self.system_dict["params"]["size"] == 300):
            self.system_dict["local"]["cfg"] = COCO_300
        else:
            self.system_dict["local"]["cfg"] = COCO_512

        if self.system_dict["params"]["version"] == 'RFB_vgg':
            from models.RFB_Net_vgg import build_net
        elif self.system_dict["params"]["version"] == 'RFB_E_vgg':
            from models.RFB_Net_E_vgg import build_net
        elif self.system_dict["params"]["version"] == 'RFB_mobile':
            from models.RFB_Net_mobile import build_net
            self.system_dict["local"]["cfg"] = COCO_mobile_300
        else:
            print('Unkown version!')

        self.system_dict["local"]["priorbox"] = PriorBox(
            self.system_dict["local"]["cfg"])
        with torch.no_grad():
            self.system_dict["local"]["priors"] = self.system_dict["local"][
                "priorbox"].forward()
            if self.system_dict["params"]["cuda"]:
                self.system_dict["local"]["priors"] = self.system_dict[
                    "local"]["priors"].cuda()

        img_dim = (300, 512)[self.system_dict["params"]["size"] == 512]
        num_classes = len(self.system_dict["local"]["classes"])
        self.system_dict["local"]["net"] = build_net('test', img_dim,
                                                     num_classes)
        state_dict = torch.load(self.system_dict["params"]["trained_model"])

        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            head = k[:7]
            if head == 'module.':
                name = k[7:]  # remove `module.`
            else:
                name = k
            new_state_dict[name] = v
        self.system_dict["local"]["net"].load_state_dict(new_state_dict)
        self.system_dict["local"]["net"].eval()
        print('Finished loading model!')

        if self.system_dict["params"]["cuda"]:
            self.system_dict["local"]["net"] = self.system_dict["local"][
                "net"].cuda()
            cudnn.benchmark = True
        else:
            self.system_dict["local"]["net"] = self.system_dict["local"][
                "net"].cpu()

        self.system_dict["local"]["detector"] = Detect(
            num_classes, 0, self.system_dict["local"]["cfg"])
Esempio n. 14
0
    rgb_means = (103.94, 116.78, 123.68)
# -------------------------------------------------------------------------------------------------------------------- #

# -------------------------------------------------------------------------------------------------------------------- #
# Model Initialize
p = (0.6, 0.2)[args.version == 'RFB_mobile']
num_classes = (21, 81)[args.dataset == 'COCO']
batch_size = args.batch_size
weight_decay = 0.0005
gamma = 0.1
momentum = 0.9
if args.visdom:
    import visdom
    viz = visdom.Visdom()

net = build_net(img_dim, num_classes)
print(net)
# -------------------------------------------------------------------------------------------------------------------- #

# -------------------------------------------------------------------------------------------------------------------- #
# Load weight
if not args.resume_net:
    base_weights = torch.load(args.basenet)
    print('Loading base network...')
    net.vgg.load_state_dict(base_weights) # base --> vgg

    def xavier(param):
        init.xavier_uniform(param)

    def weights_init(m):
        for key in m.state_dict():
Esempio n. 15
0
        chkpt = torch.load(ops.landmarks_model,
                           map_location=lambda storage, loc: storage)
        landmarks_model.load_state_dict(chkpt)
        landmarks_model.eval()  # 设置为前向推断模式
        print('load landmarks model : {}'.format(ops.landmarks_model))
        print(
            '\n/******************* landmarks model acc  ******************/')
        acc_landmarks_model(ops, landmarks_model)
    landmarks_model = landmarks_model.to(device)

    #--------------------------------------------------------------------------- 构建人脸检测模型
    cfg = VOC_300
    rgb_means = VOC_300['rgb_means']
    num_classes = ops.num_classes
    use_cuda = torch.cuda.is_available()
    detect_model = build_net('test', ops.img_dim,
                             ops.num_classes)  # initialize detector

    #---------------------------------------------
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # chkpt = torch.load(ops.detect_model, map_location=device)
    chkpt = torch.load(ops.detect_model,
                       map_location=lambda storage, loc: storage)
    detect_model.load_state_dict(chkpt)
    detect_model.eval()  # 设置为前向推断模式

    acc_model(ops, detect_model)
    detect_model = detect_model.to(device)

    detector = Detect(ops.num_classes, 0, cfg)  #  num_classes, bkg_label, cfg
    priorbox = PriorBox(cfg, debug_=False)
    with torch.no_grad():
Esempio n. 16
0
    from models.RFB_Net_mobile import build_net
    cfg = COCO_mobile_300
else:
    print('Unkown version!')

img_dim = (300, 512)[args.size == '512']
rgb_means = ((104, 117, 123), (103.94, 116.78,
                               123.68))[args.version == 'RFB_mobile']
p = (0.6, 0.2)[args.version == 'RFB_mobile']
num_classes = (21, 81)[args.dataset == 'COCO']
batch_size = args.batch_size
weight_decay = 0.0005
gamma = 0.1
momentum = 0.9

net = build_net('train', img_dim, num_classes)
print(net)
if args.resume_net == None:
    base_weights = torch.load(args.basenet)
    print('Loading base network...')
    net.base.load_state_dict(base_weights)

    def xavier(param):
        init.xavier_uniform(param)

    def weights_init(m):
        for key in m.state_dict():
            if key.split('.')[-1] == 'weight':
                if 'conv' in key:
                    init.kaiming_normal_(m.state_dict()[key], mode='fan_out')
                if 'bn' in key:
Esempio n. 17
0
            _t['im_detect'].clear()
            _t['misc'].clear()

    with open(det_file, 'wb') as f:
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)

    print('Evaluating detections')
    testset.evaluate_detections(all_boxes, save_folder)


if __name__ == '__main__':
    # load net
    img_dim = (300,512)[args.size=='512']
    num_classes = (21, 81)[args.dataset == 'COCO']
    #net = build_net('test', img_dim, num_classes)    # initialize detector
    net = build_net('test', img_dim, num_classes,rate=args.rate)
    state_dict = torch.load(args.trained_model)
    # create new OrderedDict that does not contain `module.`

    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        head = k[:7]
        if head == 'module.':
            name = k[7:] # remove `module.`
        else:
            name = k
        new_state_dict[name] = v
    net.load_state_dict(new_state_dict)
    net.eval()
    print('Finished loading model!')
Esempio n. 18
0
            # keep = nms(c_bboxes,c_scores)

            keep = nms(c_dets, 0.2, force_cpu=args.cpu)
            c_dets = c_dets[keep, :]
            all_boxes[j] = c_dets

        nms_time = _t['misc'].toc()
        total_time = detect_time + nms_time

        #print('total time: ', total_time)
        return all_boxes, total_time


if __name__ == '__main__':
    # load net
    net = build_net('test', img_dim, num_classes)  # initialize detector
    state_dict = torch.load(args.trained_model)
    # create new OrderedDict that does not contain `module.`

    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        head = k[:7]
        if head == 'module.':
            name = k[7:]  # remove `module.`
        else:
            name = k
        new_state_dict[name] = v
    net.load_state_dict(new_state_dict)
    net.eval()
    print('Finished loading model!')
Esempio n. 19
0
    print('Evaluating detections')
    print(all_boxes[1])
    testset.evaluate_detections(all_boxes, save_folder)


if __name__ == '__main__':
    # load net
    img_dim = (300, 512)[args.size == '512']
    #num_classes = (21, 81)[args.dataset == 'COCO']
    if (args.dataset == 'VOC'):
        num_classes = 21
    elif (args.dataset == 'd2City'):
        num_classes = 13
    elif (args.data == 'COCO'):
        num_classes = 81
    net = build_net('test', img_dim, num_classes,
                    rate=args.rate)  # initialize detector
    state_dict = torch.load(args.trained_model)
    # create new OrderedDict that does not contain `module.`

    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        head = k[:7]
        if head == 'module.':
            name = k[7:]  # remove `module.`
        else:
            name = k
        new_state_dict[name] = v
    net.load_state_dict(new_state_dict)
    net.eval()
    print('Finished loading model!')