コード例 #1
0
def detect_parking_spaces(dir,
                          threshold=0.2,
                          save=False,
                          show=False,
                          cam=-1,
                          gpu=False,
                          config='training/m2det/configs/m2det512_vgg.py',
                          weights='training/m2det/weights/m2det512_vgg.pth'):
    print('Detect Parking Spaces Programe')
    cfg = Config.fromfile(config)
    anchor_config = anchors(cfg)

    priorbox = PriorBox(anchor_config)
    net = build_net('test',
                    size=cfg.model.input_size,
                    config=cfg.model.m2det_config)
    init_net(net, cfg, weights)
    net.eval()
    if not gpu:
        cfg.test_cfg.cuda = False

    with torch.no_grad():
        priors = priorbox.forward()
        if cfg.test_cfg.cuda:
            net = net.cuda()
            priors = priors.cuda()
            cudnn.benchmark = True
        else:
            net = net.cpu()
    print_info('===> Finished constructing and loading model')

    _preprocess = BaseTransform(cfg.model.input_size, cfg.model.rgb_means,
                                (2, 0, 1))
    detector = Detect(cfg.model.m2det_config.num_classes, cfg.loss.bkg_label,
                      anchor_config)

    base = int(np.ceil(pow(cfg.model.m2det_config.num_classes, 1. / 3)))
    colors = [
        _to_color(x, base) for x in range(cfg.model.m2det_config.num_classes)
    ]
    cats = [
        _.strip().split(',')[-1]
        for _ in open('training/m2det/data/coco_labels.txt', 'r').readlines()
    ]
    labels = tuple(['__background__'] + cats)

    im_path = dir + '/images'
    cam = cam
    if cam >= 0:
        capture = cv2.VideoCapture(cam)
    im_fnames = sorted((fname for fname in os.listdir(im_path)
                        if os.path.splitext(fname)[-1] == '.jpg'))
    im_fnames = (os.path.join(im_path, fname) for fname in im_fnames)
    im_iter = iter(im_fnames)

    save_dir = dir + '/detection_images'
    os.makedirs(save_dir, exist_ok=True)
    locs_list = {}
    while True:
        if cam < 0:
            try:
                fname = next(im_iter)
            except StopIteration:
                break
            image = cv2.imread(fname, cv2.IMREAD_COLOR)
        else:
            ret, image = capture.read()
            if not ret:
                cv2.destroyAllWindows()
                capture.release()
                break

        loop_start = time.time()
        w, h = image.shape[1], image.shape[0]
        img = _preprocess(image).unsqueeze(0)
        if cfg.test_cfg.cuda:
            img = img.cuda()
        scale = torch.Tensor([w, h, w, h])
        out = net(img)
        if not gpu:
            priors = priors.cpu()

        boxes, scores = detector.forward(out, priors)
        boxes = (boxes[0] * scale).cpu().numpy()
        scores = scores[0].cpu().numpy()
        allboxes = []

        for j in range(1, cfg.model.m2det_config.num_classes):
            inds = np.where(scores[:, j] > cfg.test_cfg.score_threshold)[0]
            if len(inds) == 0:
                continue
            c_bboxes = boxes[inds]
            c_scores = scores[inds, j]
            c_dets = np.hstack(
                (c_bboxes, c_scores[:, np.newaxis])).astype(np.float32,
                                                            copy=False)
            soft_nms = cfg.test_cfg.soft_nms
            keep = nms(
                c_dets, cfg.test_cfg.iou, force_cpu=soft_nms
            )  #min_thresh, device_id=0 if cfg.test_cfg.cuda else None)
            keep = keep[:cfg.test_cfg.keep_per_class]
            c_dets = c_dets[keep, :]
            allboxes.extend([_.tolist() + [j] for _ in c_dets])

        loop_time = time.time() - loop_start
        allboxes = np.array(allboxes)
        boxes = allboxes[:, :4]
        scores = allboxes[:, 4]
        cls_inds = allboxes[:, 5]
        # print('\n'.join(['pos:{}, ids:{}, score:{:.3f}'.format('(%.1f,%.1f,%.1f,%.1f)' % (o[0],o[1],o[2],o[3]) \
        #         ,labels[int(oo)],ooo) for o,oo,ooo in zip(boxes,cls_inds,scores)]))
        fps = 1.0 / float(loop_time) if cam >= 0 else -1
        im2show, loc = draw_detection(image,
                                      boxes,
                                      scores,
                                      cls_inds,
                                      fps,
                                      threshold,
                                      colors=colors,
                                      labels=labels)
        locs_list[fname] = loc

        if im2show.shape[0] > 1100:
            im2show = cv2.resize(im2show, (int(
                1000. * float(im2show.shape[1]) / im2show.shape[0]), 1000))
        if show:
            cv2.imshow('test', im2show)
            if cam < 0:
                cv2.waitKey(1000)
            else:
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    cv2.destroyAllWindows()
                    capture.release()
                    break
        if save:
            name = fname.split('.')[0]
            name = name.split('/')[-1]
            cv2.imwrite(f"{save_dir}/{name}.jpg", im2show)

    save_name = dir + '/labels/split.txt'
    f = open(save_name, 'wb')
    pickle.dump(locs_list, f)
    f.close()
コード例 #2
0
ファイル: train.py プロジェクト: abeja-inc/Platform_handson
def handler(context):
    dataset_alias = context.datasets

    trainval_dataset_id = dataset_alias['trainval']
    test_dataset_id = dataset_alias['test']

    trainval_dataset = list(load_dataset_from_api(trainval_dataset_id))
    test_dataset = list(load_dataset_from_api(test_dataset_id))

    trainval = DetectionDatasetFromAPI(trainval_dataset,
                                       transform=SSDAugmentation(
                                           min_dim, MEANS))
    test = DetectionDatasetFromAPI(test_dataset,
                                   transform=SSDAugmentation(min_dim, MEANS))
    train_dataset = trainval
    test_dataset = test

    priorbox = PriorBox(min_dim, PARAMS)
    with torch.no_grad():
        priors = priorbox.forward().to(device)

    ssd_net = build_ssd('train', priors, min_dim, num_classes)
    ssd_net = ssd_net.to(device)

    url = 'https://s3.amazonaws.com/amdegroot-models/vgg16_reducedfc.pth'
    weight_file = os.path.join(ABEJA_TRAINING_RESULT_DIR,
                               'vgg16_reducedfc.pth')
    download(url, weight_file)

    vgg_weights = torch.load(weight_file)
    print('Loading base network...')
    ssd_net.vgg.load_state_dict(vgg_weights)

    optimizer = optim.SGD(ssd_net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=5e-4)
    criterion = MultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5, False,
                             PARAMS['variance'], device)

    # loss counters
    step_index = 0

    trainloader = data.DataLoader(train_dataset,
                                  batch_size,
                                  num_workers=0,
                                  shuffle=True,
                                  collate_fn=tools.detection_collate,
                                  pin_memory=True)

    testloader = data.DataLoader(test_dataset,
                                 batch_size,
                                 num_workers=0,
                                 shuffle=False,
                                 collate_fn=tools.detection_collate,
                                 pin_memory=True)

    # create batch iterator
    iteration = 1
    while iteration <= max_iter:
        ssd_net.train()
        for images, targets in trainloader:
            if iteration > max_iter:
                break

            if iteration in lr_steps:
                step_index += 1
                adjust_learning_rate(optimizer, 0.1, step_index)

            # load train data
            images = images.to(device)
            targets = [ann.to(device) for ann in targets]

            # forward
            out = ssd_net(images)

            # backprop
            optimizer.zero_grad()
            loss_l, loss_c = criterion(out, targets)
            loss = loss_l + loss_c
            loss.backward()
            optimizer.step()

            if iteration % 100 == 0:
                print('[Train] iter {}, loss: {:.4f}'.format(
                    iteration, loss.item()))
                statistics(iteration, loss.item(), None, None, None)
                writer.add_scalar('main/loss', loss.item(), iteration)
                writer.add_scalar('main/loc_loss', loss_l.item(), iteration)
                writer.add_scalar('main/conf_loss', loss_c.item(), iteration)

            if iteration % 10000 == 0:
                eval(testloader, ssd_net, criterion, iteration)
                ssd_net.train()

            iteration += 1
    torch.save(ssd_net.state_dict(),
               os.path.join(ABEJA_TRAINING_RESULT_DIR, 'model.pth'))
コード例 #3
0
else:
    cfg = (COCO_300, COCO_512)[args.size == '512']

if args.version == 'RFB_vgg':
    from models.RFB_Net_vgg import build_net
elif args.version == 'RFB_E_vgg':
    from models.RFB_Net_E_vgg import build_net
elif args.version == 'RFB_mobile':
    from models.RFB_Net_mobile import build_net
    cfg = COCO_mobile_300
elif args.version == 'SSD_vgg':
    from models.SSD_vgg import build_net
else:
    print('Unkown version!')

priorbox = PriorBox(cfg)
priors = Variable(priorbox.forward(), volatile=True)


def py_cpu_nms(dets, thresh):
    """Pure Python NMS baseline."""
    x1 = dets[:, 0]
    y1 = dets[:, 1]
    x2 = dets[:, 2]
    y2 = dets[:, 3]
    scores = dets[:, 4]

    areas = (x2 - x1 + 1) * (y2 - y1 + 1)
    order = scores.argsort()[::-1]

    keep = []
コード例 #4
0
ファイル: demo.py プロジェクト: wolfworld6/fssd.pytorch
    plt.show()

if __name__ == "__main__":
    Image = os.listdir('image/')
    for img_name in Image:
        img = cv2.imread("image/"+img_name)
        model = 'fssd_voc_79_74.pth'
        net = build_net(300, 21)
        state_dict = torch.load(model)
        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            head = k[:7]
            if head == 'module.':
                name = k[7:] # remove `module.`
            else:
                name = k
            new_state_dict[name] = v
        net.load_state_dict(new_state_dict)
        net.eval()
        net = net.cuda()
        cudnn.benchmark = True
        print("Finished loading model")
        transform = BaseTransform(300, (104, 117, 123), (2, 0, 1))
        detector = Detect(21, 0, VOC_300)
        priorbox = PriorBox(VOC_300)
        with torch.no_grad():
            priors = priorbox.forward()
            priors = priors.cuda()
        test_net(net, img, img_name, detector, transform, priors,top_k=200, thresh=0.4)
コード例 #5
0
ファイル: train.py プロジェクト: sudohainguyen/M2Det
net = build_net(
    'train',
    size=cfg.model.input_size,  # Only 320, 512, 704 and 800 are supported
    config=cfg.model.m2det_config)
init_net(net, cfg, args.resume_net
         )  # init the network with pretrained weights or resumed weights

if args.ngpu > 1:
    net = torch.nn.DataParallel(net)
if cfg.train_cfg.cuda:
    net.cuda()
    cudnn.benchmark = True

optimizer = set_optimizer(net, cfg)
criterion = set_criterion(cfg)
priorbox = PriorBox(anchors(cfg))

with torch.no_grad():
    priors = priorbox.forward()
    if cfg.train_cfg.cuda:
        priors = priors.cuda()

if __name__ == '__main__':
    net.train()
    epoch = args.resume_epoch
    print_info('===> Loading Dataset...', ['yellow', 'bold'])
    dataset = get_dataloader(cfg, args.dataset, 'train_sets')
    epoch_size = len(dataset) // (cfg.train_cfg.per_batch_size * args.ngpu)
    max_iter = getattr(cfg.train_cfg.step_lr, args.dataset)[-1] * epoch_size
    stepvalues = [
        _ * epoch_size
コード例 #6
0
def train():
    net.train()
    # loss counters
    loc_loss = 0  # epoch
    conf_loss = 0
    epoch = 0 + args.resume_epoch
    print('Loading Dataset...')

    if args.dataset == 'VOC':
        dataset = VOCDetection(VOCroot, train_sets,
                               preproc(img_dim, rgb_means, p),
                               AnnotationTransform())
    elif args.dataset == 'COCO':
        # dataset = COCODetection(COCOroot, train_sets, preproc(
        #     img_dim, rgb_means, p))
        print('COCO not supported now!')
        return
    elif args.dataset == 'CUSTOM':
        dataset = CustomDetection(CUSTOMroot, train_sets,
                                  preproc(img_dim, rgb_means, p),
                                  CustomAnnotationTransform())
        dataset_512 = CustomDetection(CUSTOMroot, train_sets,
                                      preproc(512, rgb_means, p),
                                      CustomAnnotationTransform())
    else:
        print('Only VOC and COCO are supported now!')
        return

    epoch_size = len(dataset) // args.batch_size
    max_iter = args.max_epoch * epoch_size

    stepvalues_VOC = (150 * epoch_size, 200 * epoch_size, 250 * epoch_size
                      )  # (80000,100000,120000)
    stepvalues_COCO = (90 * epoch_size, 120 * epoch_size, 140 * epoch_size)
    stepvalues = (stepvalues_VOC, stepvalues_COCO)[args.dataset == 'COCO']
    print('Training', args.version, 'on', dataset.name)
    step_index = 0

    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
    else:
        start_iter = 0

    lr = args.lr
    image_size = 0
    for iteration in range(start_iter, max_iter):
        if iteration % epoch_size == 0:
            # create batch iterator
            image_size = ('300', '512')[1]  #[random.randint(0,1)]
            batch_iterator = iter(
                data.DataLoader((dataset, dataset_512)[image_size == '512'],
                                batch_size,
                                shuffle=True,
                                num_workers=args.num_workers,
                                collate_fn=detection_collate))
            priorbox = PriorBox((VOC_300_2, VOC_512_3)[image_size == '512'])
            priors = Variable(priorbox.forward(), volatile=True)
            loc_loss = 0
            conf_loss = 0
            #if (epoch % 10 == 0 and epoch > 0) or (epoch % 5 == 0 and epoch > 200):
            #torch.save(net.state_dict(), args.save_folder + args.version + '_' + args.dataset + '_epoches_' +
            #repr(epoch) + '.pth')
            epoch += 1

        if iteration in stepvalues:
            step_index += 1
        lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index,
                                  iteration, epoch_size)

        # load train data
        images, targets = next(batch_iterator)

        # print(np.sum([torch.sum(anno[:,-1] == 2) for anno in targets]))

        if args.cuda:
            images = Variable(images.cuda())
            targets = [
                Variable(anno.cuda(), volatile=True) for anno in targets
            ]
        else:
            images = Variable(images)
            targets = [Variable(anno, volatile=True) for anno in targets]
        # forward
        load_t0 = time.time()
        # t0 = time.time()
        out = net(images)
        # backprop
        optimizer.zero_grad()
        loss_l, loss_c = criterion(out, priors, targets)
        loss = loss_l + loss_c
        loss.backward()
        optimizer.step()
        # t1 = time.time()
        load_t1 = time.time()
        loc_loss += loss_l.data[0]
        conf_loss += loss_c.data[0]

        if iteration % 100 == 0:
            print('Epoch:' + repr(epoch) + ' || image-size:' +
                  repr(image_size) + ' || epochiter: ' +
                  repr(iteration % epoch_size) + '/' + repr(epoch_size) +
                  '|| Totel iter ' + repr(iteration) +
                  ' || L: %.4f C: %.4f||' % (loss_l.data[0], loss_c.data[0]) +
                  'Batch time: %.4f sec. ||' % (load_t1 - load_t0) +
                  'LR: %.8f' % (lr))
        if iteration <= 110000 and (iteration == 0 or iteration % 10000 == 0):
            print('Saving state, iter:', iteration)
            torch.save(net.state_dict(),
                       'weights/ssd300_2_VOC_' + repr(iteration) + '.pth')
        elif (iteration > 110000) and iteration % 1000 == 0:
            print('Saving state, iter:', iteration)
            torch.save(net.state_dict(),
                       'weights/ssd300_2_VOC_' + repr(iteration) + '.pth')

    torch.save(
        net.state_dict(), args.save_folder + 'Final_' + args.version + '_' +
        args.dataset + '.pth')
コード例 #7
0
ファイル: test_image.py プロジェクト: xychen9459/TDRN
def main():
    mean = (104, 117, 123)
    print('loading model!')
    if deform:
        from model.dualrefinedet_vggbn import build_net
        net = build_net('test',
                        size=ssd_dim,
                        num_classes=num_classes,
                        c7_channel=1024,
                        def_groups=deform,
                        multihead=multihead,
                        bn=bn)
    else:
        from model.refinedet_vgg import build_net
        net = build_net('test',
                        size=ssd_dim,
                        num_classes=num_classes,
                        use_refine=refine,
                        c7_channel=1024,
                        bn=bn)
    net.load_state_dict(torch.load(trained_model))
    net.eval()
    print('Finished loading model!', trained_model)
    net = net.to(device)
    detector = Detect(num_classes, 0, top_k, confidence_threshold,
                      nms_threshold)
    priorbox = PriorBox(cfg)
    with torch.no_grad():
        priors = priorbox.forward().to(device)
    for i, line in enumerate(open(img_set, 'r')):
        # if i==10:
        #     break
        if 'COCO' in dataset:
            image_name = line[:-1]
            image_id = int(image_name.split('_')[-1])
        elif 'VOC' in dataset:
            image_name = line[:-1]
            image_id = -1
        else:
            image_name, image_id = line.split(' ')
            image_id = image_id[:-1]
        print(i, image_name, image_id)
        image_path = os.path.join(img_root, image_name + '.jpg')
        image = cv2.imread(image_path, 1)
        h, w, _ = image.shape
        image_draw = cv2.resize(image.copy(), (640, 480))
        im_trans = base_transform(image, ssd_dim, mean)
        ######################## Detection ########################
        with torch.no_grad():
            x = torch.from_numpy(im_trans).unsqueeze(0).permute(0, 3, 1,
                                                                2).to(device)
            if 'RefineDet' in backbone and refine:
                arm_loc, _, loc, conf = net(x)
            else:
                loc, conf = net(x)
                arm_loc = None
            detections = detector.forward(loc,
                                          conf,
                                          priors,
                                          arm_loc_data=arm_loc)
        ############################################################
        out = list()
        for j in range(1, detections.size(1)):
            dets = detections[0, j, :]
            if dets.sum() == 0:
                continue
            mask = dets[:, 0].gt(0.).expand(dets.size(-1), dets.size(0)).t()
            dets = torch.masked_select(dets, mask).view(-1, dets.size(-1))
            boxes = dets[:, 1:-1] if dets.size(-1) == 6 else dets[:, 1:]
            boxes[:, 0] *= w
            boxes[:, 2] *= w
            boxes[:, 1] *= h
            boxes[:, 3] *= h
            scores = dets[:, 0].cpu().numpy()
            boxes_np = boxes.cpu().numpy()

            for b, s in zip(boxes_np, scores):
                if save_dir:
                    out.append(
                        [int(b[0]),
                         int(b[1]),
                         int(b[2]),
                         int(b[3]), j - 1, s])
                    if 'COCO' in dataset:
                        det_list.append({
                            'image_id':
                            image_id,
                            'category_id':
                            labelmap[j],
                            'bbox': [
                                float('{:.1f}'.format(b[0])),
                                float('{:.1f}'.format(b[1])),
                                float('{:.1f}'.format(b[2] - b[0] + 1)),
                                float('{:.1f}'.format(b[3] - b[1] + 1))
                            ],
                            'score':
                            float('{:.2f}'.format(s))
                        })
                    else:
                        results_file.write(
                            str(image_id) + ' ' + str(j) + ' ' + str(s) + ' ' +
                            str(np.around(b[0], 2)) + ' ' +
                            str(np.around(b[1], 2)) + ' ' +
                            str(np.around(b[2], 2)) + ' ' +
                            str(np.around(b[3], 2)) + '\n')
                if display:
                    cv2.rectangle(image_draw,
                                  (int(b[0] / w * 640), int(b[1] / h * 480)),
                                  (int(b[2] / w * 640), int(b[3] / h * 480)),
                                  (0, 255, 0),
                                  thickness=1)

                    cls = class_name[j] if 'COCO' in dataset else str(
                        labelmap[j - 1])
                    put_str = cls + ':' + str(np.around(s, decimals=2))
                    cv2.putText(
                        image_draw,
                        put_str,
                        (int(b[0] / w * 640), int(b[1] / h * 480) - 10),
                        cv2.FONT_HERSHEY_DUPLEX,
                        0.5,
                        color=(0, 255, 0),
                        thickness=1)
        if display:
            cv2.imshow('frame', image_draw)
            ch = cv2.waitKey(0)
            if ch == 115:
                if save_dir:
                    print('save: ', line)
                    torch.save(
                        out, os.path.join(save_dir, '%s.pkl' % str(line[:-1])))
                    cv2.imwrite(
                        os.path.join(save_dir, '%s.jpg' % str(line[:-1])),
                        image)
                    cv2.imwrite(
                        os.path.join(save_dir, '%s_box.jpg' % str(line[:-1])),
                        image_draw)

    cv2.destroyAllWindows()
    if save_dir:
        if dataset == 'COCO':
            json.dump(det_list, results_file)
        results_file.close()
コード例 #8
0
    optimizer = optim.SGD(model_.parameters(),
                          lr=init_lr,
                          momentum=momentum,
                          weight_decay=weight_decay)
    # model_ = model_.to(device)
    model_.cuda()
    model_.train()
    use_cuda = torch.cuda.is_available()
    model_.train()
    # loss counters
    loc_loss = 0  # epoch
    conf_loss = 0

    criterion = MultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5, False)

    priorbox = PriorBox(VOC_300, debug_=False)

    with torch.no_grad():
        priors = priorbox.forward()
        if use_cuda:
            priors = priors.cuda()
            print('priors.size(0) : ', priors.size(0))
    #------------------------------------------------

    epochs = 1000

    # (104,117,123)
    best_loss = np.inf
    loss_mean = np.inf
    loss_idx = 0.
コード例 #9
0
    def setup(self):
        f = open(
            "Monk_Object_Detection/15_pytorch_peleenet/lib/configs/Pelee_VOC.py"
        )
        lines = f.read()
        f.close()

        lines = lines.replace(
            "save_epochs=10",
            "save_epochs=" + str(self.system_dict["params"]["num_epochs"]))
        lines = lines.replace(
            "print_epochs=10",
            "print_epochs=" + str(self.system_dict["params"]["num_epochs"]))
        lines = lines.replace(
            "weights_save='weights/'", "weights_save='" +
            self.system_dict["params"]["model_output_dir"] + "/'")
        if (self.system_dict["params"]["use_gpu"]):
            lines = lines.replace("cuda=True", "cuda=True")
        else:
            lines = lines.replace("cuda=True", "cuda=False")
        lines = lines.replace(
            "per_batch_size=64",
            "per_batch_size=" + str(self.system_dict["params"]["batch_size"]))
        lines = lines.replace(
            "num_workers=8",
            "num_workers=" + str(self.system_dict["params"]["num_workers"]))

        f = open("config.py", 'w')
        f.write(lines)
        f.close()

        self.system_dict["local"]["cfg"] = Config.fromfile("config.py")

        print_info('===> Loading Dataset...', ['yellow', 'bold'])
        self.system_dict["local"]["dataset"] = get_dataloader(
            self.system_dict["local"]["cfg"],
            train_img_dir=self.system_dict["params"]["train_img_dir"],
            train_anno_dir=self.system_dict["params"]["train_anno_dir"],
            class_file=self.system_dict["params"]["class_file"])
        print_info('===> Done...', ['yellow', 'bold'])

        print_info('===> Setting up epoch details...', ['yellow', 'bold'])
        self.system_dict["local"]["epoch_size"] = len(
            self.system_dict["local"]["dataset"]) // (
                self.system_dict["local"]["cfg"].train_cfg.per_batch_size *
                self.system_dict["params"]["ngpu"])

        self.system_dict["local"]["max_iter"] = self.system_dict["local"][
            "epoch_size"] * self.system_dict["params"]["num_epochs"]

        self.system_dict["local"]["stepvalues"] = [
            self.system_dict["local"]["max_iter"] // 3,
            2 * self.system_dict["local"]["max_iter"] // 3
        ]

        f = open("config.py")
        lines = f.read()
        f.close()

        lines = lines.replace(
            "step_lr=[80000, 100000, 120000,160000]",
            "step_lr=" + str(self.system_dict["local"]["stepvalues"]))
        lines = lines.replace(
            "num_classes=21", "num_classes=" +
            str(len(self.system_dict["local"]["dataset"].class_to_ind)))
        lines = lines.replace("lr=5e-3",
                              "lr=" + str(self.system_dict["params"]["lr"]))
        lines = lines.replace(
            "gamma=0.1", "gamma=" + str(self.system_dict["params"]["gamma"]))
        lines = lines.replace(
            "momentum=0.9",
            "momentum=" + str(self.system_dict["params"]["momentum"]))
        lines = lines.replace(
            "weight_decay=0.0005",
            "weight_decay=" + str(self.system_dict["params"]["weight_decay"]))

        f = open("config_final.py", 'w')
        f.write(lines)
        f.close()
        print_info('===> Done...', ['yellow', 'bold'])

        self.system_dict["local"]["cfg"] = Config.fromfile("config_final.py")
        #print(self.system_dict["local"]["cfg"])

        self.system_dict["local"]["net"] = build_net(
            'train', self.system_dict["local"]["cfg"].model.input_size,
            self.system_dict["local"]["cfg"].model)

        if (self.system_dict["params"]["resume_net"]):
            init_net(self.system_dict["local"]["net"],
                     self.system_dict["local"]["cfg"],
                     self.system_dict["params"]
                     ["resume_net"])  # init the network with pretrained
        if self.system_dict["params"]["ngpu"] > 1:
            self.system_dict["local"]["net"] = torch.nn.DataParallel(
                self.system_dict["local"]["net"])
        if self.system_dict["local"]["cfg"].train_cfg.cuda:
            self.system_dict["local"]["net"].cuda()
            cudnn.benckmark = True

        self.system_dict["local"]["optimizer"] = set_optimizer(
            self.system_dict["local"]["net"], self.system_dict["local"]["cfg"])
        self.system_dict["local"]["criterion"] = set_criterion(
            self.system_dict["local"]["cfg"])
        self.system_dict["local"]["priorbox"] = PriorBox(
            anchors(self.system_dict["local"]["cfg"].model))

        with torch.no_grad():
            self.system_dict["local"]["priors"] = self.system_dict["local"][
                "priorbox"].forward()
            if self.system_dict["local"]["cfg"].train_cfg.cuda:
                self.system_dict["local"]["priors"] = self.system_dict[
                    "local"]["priors"].cuda()
コード例 #10
0
def get_prior():
    cfg = (VOC_300, VOC_512)[args.size == '512']
    priorbox = PriorBox(cfg)
    priors = Variable(priorbox.forward(), volatile=True)
    return priors
コード例 #11
0
def train():
    # network set-up
    ssd_net = build_refine('train',
                           cfg['min_dim'],
                           cfg['num_classes'],
                           use_refine=True,
                           use_tcb=True)
    net = ssd_net

    if args.cuda:
        net = torch.nn.DataParallel(
            ssd_net)  # state_dict will have .module. prefix
        cudnn.benchmark = True

    if args.resume:
        print('Resuming training, loading {}...'.format(args.resume))
        ssd_net.load_weights(args.resume)
    else:
        print('Using preloaded base network...')  # Preloaded.
        print('Initializing other weights...')
        # initialize newly added layers' weights with xavier method
        ssd_net.extras.apply(weights_init)
        ssd_net.trans_layers.apply(weights_init)
        ssd_net.latent_layrs.apply(weights_init)
        ssd_net.up_layers.apply(weights_init)
        ssd_net.arm_loc.apply(weights_init)
        ssd_net.arm_conf.apply(weights_init)
        ssd_net.odm_loc.apply(weights_init)
        ssd_net.odm_conf.apply(weights_init)

    if args.cuda:
        net = net.cuda()

    # otimizer and loss set-up
    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    arm_criterion = RefineMultiBoxLoss(2, 0.5, True, 0, True, 3, 0.5, False, 0,
                                       args.cuda)
    odm_criterion = RefineMultiBoxLoss(
        cfg['num_classes'], 0.5, True, 0, True, 3, 0.5, False, 0.01,
        args.cuda)  # 0.01 -> 0.99 negative confidence threshold

    # different from normal ssd, where the PriorBox is stored inside SSD object
    priorbox = PriorBox(cfg)
    priors = Variable(priorbox.forward(), volatile=True)
    # detector used in test_net for testing
    detector = RefineDetect(cfg['num_classes'], 0, cfg, object_score=0.01)

    net.train()
    # loss counters
    loc_loss = 0
    conf_loss = 0
    epoch = 0
    print('Loading the dataset...')

    epoch_size = len(dataset) // args.batch_size
    print('Training refineDet on:', dataset.name)
    print('Using the specified args:')
    print(args)

    if args.visdom:
        import visdom
        viz = visdom.Visdom()
        # initialize visdom loss plot
        vis_title = 'SSD.PyTorch on ' + dataset.name
        vis_legend = ['Loc Loss', 'Conf Loss', 'Total Loss']
        iter_plot = create_vis_plot('Iteration', 'Loss', vis_title, vis_legend)
        epoch_plot = create_vis_plot('Epoch', 'Loss', vis_title, vis_legend)

    # adjust learning rate based on epoch
    stepvalues_VOC = (150 * epoch_size, 200 * epoch_size, 250 * epoch_size)
    stepvalues_COCO = (90 * epoch_size, 120 * epoch_size, 140 * epoch_size)
    stepvalues = (stepvalues_VOC, stepvalues_COCO)[args.dataset == 'COCO']
    step_index = 0

    # training data loader
    data_loader = data.DataLoader(dataset,
                                  args.batch_size,
                                  num_workers=args.num_workers,
                                  shuffle=True,
                                  collate_fn=detection_collate,
                                  pin_memory=True)
    # create batch iterator
    batch_iterator = iter(data_loader)
    #    batch_iterator = None
    mean_odm_loss_c = 0
    mean_odm_loss_l = 0
    mean_arm_loss_c = 0
    mean_arm_loss_l = 0
    # max_iter = cfg['max_epoch'] * epoch_size
    for iteration in range(args.start_iter,
                           cfg['max_epoch'] * epoch_size + 10):
        try:
            images, targets = next(batch_iterator)
        except StopIteration:
            batch_iterator = iter(
                data_loader)  # the dataloader cannot re-initilize
            images, targets = next(batch_iterator)

        if args.visdom and iteration != 0 and (iteration % epoch_size == 0):
            # update visdom loss plot
            update_vis_plot(epoch, loc_loss, conf_loss, epoch_plot, None,
                            'append', epoch_size)
            # reset epoch loss counters
            loc_loss = 0
            conf_loss = 0

        if iteration != 0 and (iteration % epoch_size == 0):
            #        adjust_learning_rate(optimizer, args.gamma, epoch)
            # evaluation
            if args.evaluate == True:
                # load net
                net.eval()
                APs, mAP = test_net(args.eval_folder,
                                    net,
                                    detector,
                                    priors,
                                    args.cuda,
                                    val_dataset,
                                    BaseTransform(net.module.size,
                                                  cfg['testset_mean']),
                                    args.max_per_image,
                                    thresh=args.confidence_threshold
                                    )  # 320 originally for cfg['min_dim']
                net.train()
            epoch += 1

        # update learning rate
        if iteration in stepvalues:
            step_index = stepvalues.index(iteration) + 1
        lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index,
                                  iteration, epoch_size)

        if args.cuda:
            images = Variable(images.cuda())
            targets = [Variable(ann.cuda(), volatile=True) for ann in targets]
        else:
            images = Variable(images)
            targets = [Variable(ann, volatile=True) for ann in targets]
        # forward
        t0 = time.time()
        out = net(images)
        arm_loc, arm_conf, odm_loc, odm_conf = out
        # backprop
        optimizer.zero_grad()
        #arm branch loss
        #priors = priors.type(type(images.data)) #convert to same datatype
        arm_loss_l, arm_loss_c = arm_criterion((arm_loc, arm_conf), priors,
                                               targets)
        #odm branch loss
        odm_loss_l, odm_loss_c = odm_criterion(
            (odm_loc, odm_conf), priors, targets, (arm_loc, arm_conf), False)

        mean_arm_loss_c += arm_loss_c.data[0]
        mean_arm_loss_l += arm_loss_l.data[0]
        mean_odm_loss_c += odm_loss_c.data[0]
        mean_odm_loss_l += odm_loss_l.data[0]

        loss = arm_loss_l + arm_loss_c + odm_loss_l + odm_loss_c
        loss.backward()
        optimizer.step()
        t1 = time.time()

        if iteration % 10 == 0:
            print('Epoch:' + repr(epoch) + ' || epochiter: ' +
                  repr(iteration % epoch_size) + '/' + repr(epoch_size) +
                  '|| Total iter ' + repr(iteration) +
                  ' || AL: %.4f AC: %.4f OL: %.4f OC: %.4f||' %
                  (mean_arm_loss_l / 10, mean_arm_loss_c / 10,
                   mean_odm_loss_l / 10, mean_odm_loss_c / 10) +
                  'Timer: %.4f sec. ||' % (t1 - t0) + 'Loss: %.4f ||' %
                  (loss.data[0]) + 'LR: %.8f' % (lr))

            mean_odm_loss_c = 0
            mean_odm_loss_l = 0
            mean_arm_loss_c = 0
            mean_arm_loss_l = 0


#        if args.visdom:
#            update_vis_plot(iteration, loss_l.data[0], loss_c.data[0],
#                            iter_plot, epoch_plot, 'append')

        if iteration != 0 and iteration % 5000 == 0:
            print('Saving state, iter:', iteration)
            torch.save(ssd_net.state_dict(),
                       'weights/ssd300_refineDet_' + repr(iteration) + '.pth')

    torch.save(ssd_net.state_dict(),
               args.save_folder + '' + args.dataset + '.pth')
コード例 #12
0
from layers.functions import Detect, PriorBox
from data.config import VOC_320
import torch

top_k = 200
confidence_threshold = 0.5
nms_threshold = 0.45

priorbox = PriorBox(VOC_320)
detector = Detect(21, 0, top_k, confidence_threshold, nms_threshold)
with torch.no_grad():
    priors = priorbox.forward()
    loc = torch.randn(1, 6375, 4)
    conf = torch.randn(6375, 21)
    arm_loc = torch.randn(1, 6375, 4)

out = detector.forward(loc, conf, priors, arm_loc_data=None)
コード例 #13
0
ファイル: train.py プロジェクト: sclzsx/Improved_SSD
def train(args):
    cfg = (VOC_300, VOC_512)[args.size == '512']
    if args.version == 'SSD_VGG_Mobile_Little':
        from models.SSD_VGG_Mobile_Little import build_net
        cfg = VEHICLE_240
    elif args.version == 'SSD_VGG_Optim_FPN_RFB':
        from models.SSD_VGG_Optim_FPN_RFB import build_net
    elif args.version == 'SSD_ResNet_FPN':
        from models.SSD_ResNet_FPN import build_net
    elif args.version == 'SSD_HRNet':
        from models.SSD_HRNet import build_net
    elif args.version == 'EfficientDet':
        from models.EfficientDet import build_net
    elif args.version == 'SSD_DetNet':
        from models.SSD_DetNet import build_net
        cfg = DetNet_300
    elif args.version == 'SSD_M2Det':
        from models.SSD_M2Det import build_net
        cfg = M2Det_320
    elif args.version == 'SSD_Pelee':
        from models.SSD_Pelee import build_net
    else:
        args.version = 'SSD_VGG_RFB'
        from models.SSD_VGG_RFB import build_net

    if args.loss == "OHEM":
        criterion = MultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5,
                                 False)
    elif args.loss == "GIOU":
        criterion = GIOUMultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5,
                                     False)
    elif args.loss == "DIOU":
        criterion = GIOUMultiBoxLoss(num_classes,
                                     0.5,
                                     True,
                                     0,
                                     True,
                                     3,
                                     0.5,
                                     False,
                                     loss_name='Diou')
    elif args.loss == "CIOU":
        criterion = GIOUMultiBoxLoss(num_classes,
                                     0.5,
                                     True,
                                     0,
                                     True,
                                     3,
                                     0.5,
                                     False,
                                     loss_name='Ciou')
    elif args.loss == "FocalLoss":
        criterion = FocalLossMultiBoxLoss(num_classes, 0.5, True, 0, True, 3,
                                          0.5, False, args.anchor)

    if 'withneg' in DATASET:
        train_sets = [
            (DATASET.replace('_withneg', ''), 'trainval_withneg'),
        ]
    else:
        train_sets = [
            (DATASET.replace('_withneg', ''), 'trainval'),
        ]

    if args.resume_epoch == 0:
        args.save_folder = os.path.join(
            args.save_folder, DATASET, args.version, args.loss + '_' +
            args.anchor + '_' + args.fpn_type + '_bz' + str(args.bz))
        if not os.path.exists(args.save_folder):
            os.makedirs(args.save_folder)
    else:
        args.save_folder = Path(args.resume_net).parent

    try:
        net = build_net('train', cfg['min_dim'], num_classes, args.fpn_type)
    except:
        net = build_net('train', cfg['min_dim'], num_classes)

    print(args.save_folder)
    try:
        flops, params = get_model_complexity_info(
            net, (cfg['min_dim'], cfg['min_dim']), print_per_layer_stat=False)
        print('FLOPs:', flops, 'Params:', params)
    except:
        pass

    init_net(net, args.resume_net
             )  # init the network with pretrained weights or resumed weights

    if args.ngpu > 1:
        net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
    if args.cuda:
        net.cuda()
        cudnn.benchmark = True

    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=momentum,
                          weight_decay=weight_decay)

    priorbox = PriorBox(cfg)
    with torch.no_grad():
        priors = priorbox.forward()
        if args.cuda:
            priors = priors.cuda()

    dataset = VOCDetection(VOCroot, train_sets,
                           preproc(cfg['min_dim'], rgb_means, p),
                           AnnotationTransform())
    len_dataset = len(dataset)
    epoch_size = len_dataset // args.bz
    max_iter = args.max_epoch * epoch_size
    print(train_sets, 'len_dataset:', len_dataset, 'max_iter:', max_iter)

    stepvalues_VOC = (150 * epoch_size, 200 * epoch_size, 250 * epoch_size)
    stepvalues = stepvalues_VOC
    step_index = 0
    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
    else:
        start_iter = 0
    if start_iter > stepvalues[0] and start_iter < stepvalues[1]:
        step_index = 1
    elif start_iter > stepvalues[1] and start_iter < stepvalues[2]:
        step_index = 2
    elif start_iter > stepvalues[2]:
        step_index = 3

    net.train()
    writer = SummaryWriter(args.save_folder)
    loc_loss = 0
    conf_loss = 0
    epoch = 0 + args.resume_epoch
    for iteration in range(start_iter, max_iter):
        if iteration % epoch_size == 0:
            batch_iterator = iter(
                data.DataLoader(dataset,
                                args.bz,
                                shuffle=True,
                                num_workers=args.num_workers,
                                collate_fn=detection_collate,
                                pin_memory=True))
            loc_loss = 0
            conf_loss = 0
            if (epoch % 5 == 0 and epoch > 0) or (epoch % 5 == 0
                                                  and epoch > 200):
                torch.save(net.state_dict(),
                           os.path.join(args.save_folder,
                                        str(epoch) + '.pth'))
            epoch += 1

        if iteration in stepvalues:
            step_index += 1
        lr = adjust_learning_rate(args.lr, optimizer, gamma, epoch, step_index,
                                  iteration, epoch_size)

        images, targets = next(batch_iterator)
        # print(np.sum([torch.sum(anno[:,-1] == 2) for anno in targets]))

        if args.cuda:
            images = Variable(images.cuda())
            targets = [Variable(anno.cuda()) for anno in targets]
        else:
            images = Variable(images)
            targets = [Variable(anno) for anno in targets]

        out = net(images)

        optimizer.zero_grad()
        loss_l, loss_c = criterion(out, priors, targets)
        loss = loss_l + loss_c
        loss.backward()
        optimizer.step()

        loc_loss += loss_l.item()
        conf_loss += loss_c.item()

        if iteration % 10 == 0:
            print('Epoch:' + repr(epoch) + '||EpochIter: ' +
                  repr(iteration % epoch_size) + '/' + repr(epoch_size) +
                  '||Totel iter ' + repr(iteration) + '||L: %.4f C: %.4f' %
                  (loss_l.item(), loss_c.item()) + '||LR: %.8f' % (lr))
            writer.add_scalar('Train/total_loss',
                              (loss_l.item() + loss_c.item()), iteration)
            writer.add_scalar('Train/loc_loss', loss_l.item(), iteration)
            writer.add_scalar('Train/conf_loss', loss_c.item(), iteration)
            writer.add_scalar('Train/lr', lr, iteration)

    torch.save(net.state_dict(),
               os.path.join(args.save_folder,
                            str(args.max_epoch) + '.pth'))
コード例 #14
0
ファイル: evaluate.py プロジェクト: happyjin/AMTNet
def main():

    means = (104, 117, 123)  # only support voc now
    args.save_root += args.dataset + '/'
    args.data_root += args.dataset + '/'
    for eval_gap in [int(g) for g in args.eval_gaps.split(',')]:
        args.eval_gap = eval_gap
        

        args.print_step = 10
        args.fusion_type = args.fusion_type.upper()
        args.fusion = args.fusion_type in ['SUM','CAT','MEAN']
        ## Define the experiment Name will used for save directory and ENV for visdom
        if not args.fusion:
            args.exp_name = 'AMTNet-{}-s{:d}-{}-sl{:02d}sg{:02d}-bs{:02d}-lr{:05d}'.format(args.dataset, args.train_split,
                                                                                    args.input_type_base.upper(),
                                                                                    args.seq_len, args.seq_gap, 
                                                                                    args.batch_size, int(args.lr * 100000))
        else:
            args.exp_name = 'AMTNet-{}-s{:d}-{}-{}-{}-sl{:02d}sg{:02d}-bs{:02d}-lr{:05d}'.format(args.dataset, args.train_split,
                                                                                    args.fusion_type, args.input_type_base,
                                                                                    args.input_type_extra,
                                                                                    args.seq_len, args.seq_gap, 
                                                                                    args.batch_size,int(args.lr * 100000))
        print(args.exp_name, ' eg::=> ', eval_gap)
    

        args.cfg = v2
        args.num_classes = len(CLASSES[args.dataset]) + 1  # 7 +1 background
        
        # Get proior or anchor boxes
        with torch.no_grad():
            priorbox = PriorBox(v2, args.seq_len)
            priors = priorbox.forward()
            priors = priors.cuda()
            num_feat_multiplier = {'CAT': 2, 'SUM': 1, 'MEAN': 1, 'NONE': 1}
            # fusion type can one of the above keys
            args.fmd = [512, 1024, 512, 256, 256, 256]
            args.kd = 3
            args.fusion_num_muliplier = num_feat_multiplier[args.fusion_type]

            dataset = ActionDetection(args, 'test', BaseTransform(args.ssd_dim, means), NormliseBoxes(), full_test=False)

            ## DEFINE THE NETWORK
            net = AMTNet(args)
            if args.ngpu>1:
                print('\nLets do dataparallel\n\n')
                net = torch.nn.DataParallel(net)
        
                # Load dataset

            for iteration in [int(it) for it in args.eval_iters.split(',')]:
                fname = args.save_root + 'cache/' + args.exp_name + "/testing-{:d}-eg{:d}.log".format(iteration, eval_gap)
                log_file = open(fname, "w", 1)
                log_file.write(args.exp_name + '\n')
                print(fname)
                trained_model_path = args.save_root + 'cache/' + args.exp_name + '/AMTNet_' + repr(iteration) + '.pth'
                log_file.write(trained_model_path+'\n')
                # trained_model_path = '/mnt/sun-alpha/ss-workspace/CVPR2018_WORK/ssd.pytorch_exp/UCF24/guru_ssd_pipeline_weights/ssd300_ucf24_90000.pth'

                net.load_state_dict(torch.load(trained_model_path))
                print('Finished loading model %d !' % iteration)
                net.eval()
                net = net.cuda()
                
                # evaluation
                torch.cuda.synchronize()
                tt0 = time.perf_counter()
                log_file.write('Testing net \n')
                
                mAP, ap_all, ap_strs = test_net(net, priors, args, dataset, iteration)
                for ap_str in ap_strs:
                    print(ap_str)
                    log_file.write(ap_str + '\n')
                ptr_str = '\nMEANAP:::=>' + str(mAP) + '\n'
                print(ptr_str)
                log_file.write(ptr_str)
                torch.cuda.synchronize()
                print('Complete set time {:0.2f}'.format(time.perf_counter() - tt0))
                log_file.close()
コード例 #15
0
    def __init__(self):
        self.cfg = cfg

        # Load data
        print('===> Loading data')
        self.train_loader = load_data(
            cfg.dataset, 'train') if 'train' in cfg.phase else None
        self.eval_loader = load_data(cfg.dataset,
                                     'eval') if 'eval' in cfg.phase else None
        self.test_loader = load_data(cfg.dataset,
                                     'test') if 'test' in cfg.phase else None
        # self.visualize_loader = load_data(cfg.DATASET, 'visualize') if 'visualize' in cfg.PHASE else None

        # Build model
        print('===> Building model')
        self.base_trans = BaseTransform(cfg.image_size[0],
                                        cfg.network.rgb_means,
                                        cfg.network.rgb_std, (2, 0, 1))
        self.priors = PriorBox(cfg.anchor)
        self.model = eval(cfg.model + '.build_net')(cfg.image_size[0],
                                                    cfg.dataset.num_classes)
        with torch.no_grad():
            self.priors = self.priors.forward()
        self.detector = Detect2(cfg.post_process)
        # Utilize GPUs for computation
        self.use_gpu = torch.cuda.is_available()
        if cfg.train.train_scope == '':
            trainable_param = self.model.parameters()
        else:
            trainable_param = self.trainable_param(cfg.train.train_scope)
        self.output_dir = os.path.join(cfg.output_dir, cfg.name, cfg.date)
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
        self.log_dir = os.path.join(self.output_dir, 'logs')
        if not os.path.exists(self.log_dir):
            os.makedirs(self.log_dir)
        self.checkpoint = cfg.train.checkpoint

        previous = self.find_previous()
        previous = False
        if previous:
            self.start_epoch = previous[0][-1]
            self.resume_checkpoint(previous[1][-1])
        else:
            self.start_epoch = self.initialize()
        if self.use_gpu:
            print('Utilize GPUs for computation')
            print('Number of GPU available', torch.cuda.device_count())
            self.model.cuda()
            self.priors.cuda()
            cudnn.benchmark = True
            if cfg.ngpu > 1:
                self.model = torch.nn.DataParallel(self.model,
                                                   device_ids=list(
                                                       range(cfg.ngpu)))
        # Print the model architecture and parameters
        #print('Model architectures:\n{}\n'.format(self.model))

        #print('Parameters and size:')
        #for name, param in self.model.named_parameters():
        #    print('{}: {}'.format(name, list(param.size())))
        # print trainable scope
        print('Trainable scope: {}'.format(cfg.train.train_scope))
        self.optimizer = self.configure_optimizer(trainable_param,
                                                  cfg.train.optimizer)
        self.exp_lr_scheduler = self.configure_lr_scheduler(
            self.optimizer, cfg.train.lr_scheduler)
        self.max_epochs = cfg.train.lr_scheduler.max_epochs
        # metric
        if cfg.network.multi_box_loss_type == 'origin':
            self.criterion = MultiBoxLoss2(cfg.matcher, self.priors,
                                           self.use_gpu)
        else:
            print('ERROR: ' + cfg.multi_box_loss_type + ' is not supported')
            sys.exit()
        # Set the logger
        self.writer = SummaryWriter(log_dir=self.log_dir)
        self.checkpoint_prefix = cfg.name + '_' + cfg.dataset.dataset
コード例 #16
0
    def Setup(self):
        '''
        User function: Setup all parameters

        Args:
            None

        Returns:
            None
        '''
        if (self.system_dict["params"]["size"] == 300):
            self.system_dict["local"]["cfg"] = COCO_300
        else:
            self.system_dict["local"]["cfg"] = COCO_512

        if self.system_dict["params"]["version"] == 'RFB_vgg':
            from models.RFB_Net_vgg import build_net
        elif self.system_dict["params"]["version"] == 'RFB_E_vgg':
            from models.RFB_Net_E_vgg import build_net
        elif self.system_dict["params"]["version"] == 'RFB_mobile':
            from models.RFB_Net_mobile import build_net
            self.system_dict["local"]["cfg"] = COCO_mobile_300
        else:
            print('Unkown version!')

        self.system_dict["local"]["priorbox"] = PriorBox(
            self.system_dict["local"]["cfg"])
        with torch.no_grad():
            self.system_dict["local"]["priors"] = self.system_dict["local"][
                "priorbox"].forward()
            if self.system_dict["params"]["cuda"]:
                self.system_dict["local"]["priors"] = self.system_dict[
                    "local"]["priors"].cuda()

        img_dim = (300, 512)[self.system_dict["params"]["size"] == 512]
        num_classes = len(self.system_dict["local"]["classes"])
        self.system_dict["local"]["net"] = build_net('test', img_dim,
                                                     num_classes)
        state_dict = torch.load(self.system_dict["params"]["trained_model"])

        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            head = k[:7]
            if head == 'module.':
                name = k[7:]  # remove `module.`
            else:
                name = k
            new_state_dict[name] = v
        self.system_dict["local"]["net"].load_state_dict(new_state_dict)
        self.system_dict["local"]["net"].eval()
        print('Finished loading model!')

        if self.system_dict["params"]["cuda"]:
            self.system_dict["local"]["net"] = self.system_dict["local"][
                "net"].cuda()
            cudnn.benchmark = True
        else:
            self.system_dict["local"]["net"] = self.system_dict["local"][
                "net"].cpu()

        self.system_dict["local"]["detector"] = Detect(
            num_classes, 0, self.system_dict["local"]["cfg"])
コード例 #17
0
def train(cfg):
    cfg = Config.fromfile(cfg)
    net = build_net('train',
                    size=cfg.model.input_size,  # Only 320, 512, 704 and 800 are supported
                    config=cfg.model.m2det_config)
    init_net(net, cfg, False)
    net.to(device)
    if os.path.exists(checkpoint_path.format(start_epoch)):
        checkpoints = torch.load(checkpoint_path.format(start_epoch))
        net.load_state_dict(checkpoints)
        logging.info('checkpoint loaded.')

    optimizer = optim.SGD(net.parameters(),
                          lr=cfg.train_cfg.lr[0],
                          momentum=cfg.optimizer.momentum,
                          weight_decay=cfg.optimizer.weight_decay)
    criterion = MultiBoxLoss(cfg.model.m2det_config.num_classes,
                             overlap_thresh=cfg.loss.overlap_thresh,
                             prior_for_matching=cfg.loss.prior_for_matching,
                             bkg_label=cfg.loss.bkg_label,
                             neg_mining=cfg.loss.neg_mining,
                             neg_pos=cfg.loss.neg_pos,
                             neg_overlap=cfg.loss.neg_overlap,
                             encode_target=cfg.loss
                             .encode_target)
    priorbox = PriorBox(anchors(cfg))
    with torch.no_grad():
        priors = priorbox.forward().to(device)
    net.train()

    anchor_config = anchors(cfg)
    detector = Detect(cfg.model.m2det_config.num_classes,
                      cfg.loss.bkg_label, anchor_config)
    logging.info('detector initiated.')

    dataset = get_dataloader(cfg, 'Helmet', 'train_sets')
    train_ds = DataLoader(dataset, cfg.train_cfg.per_batch_size,
                          shuffle=True,
                          num_workers=0,
                          collate_fn=detection_collate)
    logging.info('dataset loaded, start to train...')

    for epoch in range(start_epoch, cfg.model.epochs):
        for i, data in enumerate(train_ds):
            try:
                lr = adjust_learning_rate_helmet(optimizer, epoch, cfg)
                images, targets = data
                images = images.to(device)
                targets = [anno.to(device) for anno in targets]
                out = net(images)

                optimizer.zero_grad()
                loss_l, loss_c = criterion(out, priors, targets)
                loss = loss_l + loss_c
                loss.backward()
                optimizer.step()

                if i % 30 == 0:
                    logging.info('Epoch: {}, iter: {}, loc_loss: {}, conf_loss: {}, loss: {}, lr: {}'.format(
                        epoch, i, loss_l.item(), loss_c.item(), loss.item(), lr
                    ))

                if i % 2000 == 0:
                    # two_imgs = images[0:2, :]
                    # out = net(two_imgs)
                    # snap_middle_result(two_imgs[0], out[0], priors, detector, cfg, epoch)
                    torch.save(net.state_dict(), checkpoint_path.format(epoch))
                    logging.info('model saved.')
            except KeyboardInterrupt:
                torch.save(net.state_dict(), checkpoint_path.format(epoch))
                logging.info('model saved.')
                exit(0)
    torch.save(net.state_dict(), checkpoint_path.format(epoch))
コード例 #18
0
    def Train(self,
              epochs=200,
              log_iters=True,
              output_weights_dir="weights",
              saved_epoch_interval=10):
        self.system_dict["params"]["max_epoch"] = epochs
        self.system_dict["params"]["log_iters"] = log_iters
        self.system_dict["params"]["save_folder"] = output_weights_dir

        if not os.path.exists(self.system_dict["params"]["save_folder"]):
            os.mkdir(self.system_dict["params"]["save_folder"])

        if (self.system_dict["params"]["size"] == 300):
            cfg = COCO_300
        else:
            cfg = COCO_512

        if self.system_dict["params"]["version"] == 'RFB_vgg':
            from models.RFB_Net_vgg import build_net
        elif self.system_dict["params"]["version"] == 'RFB_E_vgg':
            from models.RFB_Net_E_vgg import build_net
        elif self.system_dict["params"]["version"] == 'RFB_mobile':
            from models.RFB_Net_mobile import build_net
            cfg = COCO_mobile_300
        else:
            print('Unkown version!')

        img_dim = (300, 512)[self.system_dict["params"]["size"] == 512]
        rgb_means = ((104, 117, 123), (
            103.94, 116.78,
            123.68))[self.system_dict["params"]["version"] == 'RFB_mobile']
        p = (0.6, 0.2)[self.system_dict["params"]["version"] == 'RFB_mobile']

        f = open(
            self.system_dict["dataset"]["train"]["root_dir"] + "/" +
            self.system_dict["dataset"]["train"]["coco_dir"] +
            "/annotations/classes.txt", 'r')
        lines = f.readlines()
        if (lines[-1] == ""):
            num_classes = len(lines) - 1
        else:
            num_classes = len(lines) + 1

        batch_size = self.system_dict["params"]["batch_size"]
        weight_decay = self.system_dict["params"]["weight_decay"]
        gamma = self.system_dict["params"]["gamma"]
        momentum = self.system_dict["params"]["momentum"]

        self.system_dict["local"]["net"] = build_net('train', img_dim,
                                                     num_classes)

        if self.system_dict["params"]["resume_net"] == None:
            base_weights = torch.load(self.system_dict["params"]["basenet"])
            print('Loading base network...')
            self.system_dict["local"]["net"].base.load_state_dict(base_weights)

            def xavier(param):
                init.xavier_uniform(param)

            def weights_init(m):
                for key in m.state_dict():
                    if key.split('.')[-1] == 'weight':
                        if 'conv' in key:
                            init.kaiming_normal_(m.state_dict()[key],
                                                 mode='fan_out')
                        if 'bn' in key:
                            m.state_dict()[key][...] = 1
                    elif key.split('.')[-1] == 'bias':
                        m.state_dict()[key][...] = 0

            print('Initializing weights...')
            # initialize newly added layers' weights with kaiming_normal method
            self.system_dict["local"]["net"].extras.apply(weights_init)
            self.system_dict["local"]["net"].loc.apply(weights_init)
            self.system_dict["local"]["net"].conf.apply(weights_init)
            self.system_dict["local"]["net"].Norm.apply(weights_init)
            if self.system_dict["params"]["version"] == 'RFB_E_vgg':
                self.system_dict["local"]["net"].reduce.apply(weights_init)
                self.system_dict["local"]["net"].up_reduce.apply(weights_init)

        else:
            # load resume network
            print('Loading resume network...')
            state_dict = torch.load(self.system_dict["params"]["resume_net"])
            # create new OrderedDict that does not contain `module.`
            from collections import OrderedDict
            new_state_dict = OrderedDict()
            for k, v in state_dict.items():
                head = k[:7]
                if head == 'module.':
                    name = k[7:]  # remove `module.`
                else:
                    name = k
                new_state_dict[name] = v
            self.system_dict["local"]["net"].load_state_dict(new_state_dict)

        if self.system_dict["params"]["ngpu"] > 1:
            self.system_dict["local"]["net"] = torch.nn.DataParallel(
                self.system_dict["local"]["net"],
                device_ids=list(range(self.system_dict["params"]["ngpu"])))

        if self.system_dict["params"]["cuda"]:
            self.system_dict["local"]["net"].cuda()
            cudnn.benchmark = True

        optimizer = optim.SGD(
            self.system_dict["local"]["net"].parameters(),
            lr=self.system_dict["params"]["lr"],
            momentum=self.system_dict["params"]["momentum"],
            weight_decay=self.system_dict["params"]["weight_decay"])
        #optimizer = optim.RMSprop(self.system_dict["local"]["net"].parameters(), lr=self.system_dict["params"]["lr"], alpha = 0.9, eps=1e-08,
        #                      momentum=self.system_dict["params"]["momentum"], weight_decay=self.system_dict["params"]["weight_decay"])

        criterion = MultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5,
                                 False)
        priorbox = PriorBox(cfg)
        with torch.no_grad():
            priors = priorbox.forward()
            if self.system_dict["params"]["cuda"]:
                priors = priors.cuda()

        self.system_dict["local"]["net"].train()
        # loss counters
        loc_loss = 0  # epoch
        conf_loss = 0
        epoch = 0 + self.system_dict["params"]["resume_epoch"]
        print('Loading Dataset...')

        if (os.path.isdir("coco_cache")):
            os.system("rm -r coco_cache")

        dataset = COCODetection(
            self.system_dict["dataset"]["train"]["root_dir"],
            self.system_dict["dataset"]["train"]["coco_dir"],
            self.system_dict["dataset"]["train"]["set_dir"],
            preproc(img_dim, rgb_means, p))

        epoch_size = len(dataset) // self.system_dict["params"]["batch_size"]
        max_iter = self.system_dict["params"]["max_epoch"] * epoch_size

        stepvalues = (90 * epoch_size, 120 * epoch_size, 140 * epoch_size)
        print('Training', self.system_dict["params"]["version"], 'on',
              dataset.name)
        step_index = 0

        if self.system_dict["params"]["resume_epoch"] > 0:
            start_iter = self.system_dict["params"]["resume_epoch"] * epoch_size
        else:
            start_iter = 0

        lr = self.system_dict["params"]["lr"]

        for iteration in range(start_iter, max_iter):
            if iteration % epoch_size == 0:
                # create batch iterator
                batch_iterator = iter(
                    data.DataLoader(
                        dataset,
                        batch_size,
                        shuffle=True,
                        num_workers=self.system_dict["params"]["num_workers"],
                        collate_fn=detection_collate))
                loc_loss = 0
                conf_loss = 0

                torch.save(
                    self.system_dict["local"]["net"].state_dict(),
                    self.system_dict["params"]["save_folder"] + "/" +
                    self.system_dict["params"]["version"] + '_' +
                    self.system_dict["params"]["dataset"] + '_epoches_' +
                    'intermediate' + '.pth')
                epoch += 1

            load_t0 = time.time()
            if iteration in stepvalues:
                step_index += 1
            lr = self.adjust_learning_rate(optimizer,
                                           self.system_dict["params"]["gamma"],
                                           epoch, step_index, iteration,
                                           epoch_size)

            # load train data
            images, targets = next(batch_iterator)

            #print(np.sum([torch.sum(anno[:,-1] == 2) for anno in targets]))

            if self.system_dict["params"]["cuda"]:
                images = Variable(images.cuda())
                targets = [Variable(anno.cuda()) for anno in targets]
            else:
                images = Variable(images)
                targets = [Variable(anno) for anno in targets]
            # forward
            t0 = time.time()
            out = self.system_dict["local"]["net"](images)
            # backprop
            optimizer.zero_grad()
            loss_l, loss_c = criterion(out, priors, targets)
            loss = loss_l + loss_c
            loss.backward()
            optimizer.step()
            t1 = time.time()
            loc_loss += loss_l.item()
            conf_loss += loss_c.item()
            load_t1 = time.time()
            if iteration % saved_epoch_interval == 0:
                print('Epoch:' + repr(epoch) + ' || epochiter: ' +
                      repr(iteration % epoch_size) + '/' + repr(epoch_size) +
                      '|| Current iter ' + repr(iteration) + '|| Total iter ' +
                      repr(max_iter) + ' || L: %.4f C: %.4f||' %
                      (loss_l.item(), loss_c.item()) +
                      'Batch time: %.4f sec. ||' % (load_t1 - load_t0) +
                      'LR: %.8f' % (lr))

        torch.save(
            self.system_dict["local"]["net"].state_dict(),
            self.system_dict["params"]["save_folder"] + "/" + 'Final_' +
            self.system_dict["params"]["version"] + '_' +
            self.system_dict["params"]["dataset"] + '.pth')
コード例 #19
0
    ['yellow', 'bold'])

logger = set_logger(args.tensorboard)
global cfg
cfg = Config.fromfile(args.config)
net = build_net('train', cfg.model.input_size, cfg.model)
init_net(net, cfg, args.resume_net)  # init the network with pretrained
if args.ngpu > 1:
    net = torch.nn.DataParallel(net)
if cfg.train_cfg.cuda:
    net.cuda()
    cudnn.benckmark = True

optimizer = set_optimizer(net, cfg)
criterion = set_criterion(cfg)
priorbox = PriorBox(anchors(cfg.model))

with torch.no_grad():
    priors = priorbox.forward()
    if cfg.train_cfg.cuda:
        priors = priors.cuda()

if __name__ == '__main__':
    net.train()
    epoch = args.resume_epoch
    print_info('===> Loading Dataset...', ['yellow', 'bold'])
    dataset = get_dataloader(cfg, args.dataset, 'train_sets')
    epoch_size = len(dataset) // (cfg.train_cfg.per_batch_size * args.ngpu)
    max_iter = cfg.train_cfg.step_lr[-1] + 1

    stepvalues = cfg.train_cfg.step_lr
コード例 #20
0
ファイル: train_voc.py プロジェクト: jinfagang/dcmnet
def train():
    if not os.path.exists(args.save_folder):
        os.mkdir(args.save_folder)

    if args.dataset == 'VOC':
        train_sets = [('2007', 'trainval'), ('2012', 'trainval')]
        cfg = (VOC_300, VOC_512)[args.size == '512']
    else:
        train_sets = [('2014', 'train'), ('2014', 'valminusminival')]
        cfg = (COCO_300, COCO_512)[args.size == '512']

    if args.version == 'RFB_vgg':
        from models.RFB_Net_vgg import build_net
    elif args.version == 'RFB_E_vgg':
        from models.RFB_Net_E_vgg import build_net
    elif args.version == 'RFB_d2':
        from models.RFB_Net_vgg_d2 import build_net
    elif args.version == 'RFB_d3':
        from models.RFB_Net_vgg_d3 import build_net
    elif args.version == 'RFB_d4':
        from models.RFB_Net_vgg_d4 import build_net
    elif args.version == 'RFB_d4_fpn':
        from models.RFB_Net_vgg_d4_fpn import build_net
    elif args.version == 'RFB_mobile':
        from models.RFB_Net_mobile import build_net
        cfg = COCO_mobile_300
    else:
        print('Unkown version!')
    logging.info('build model version: {}'.format(args.version))

    img_dim = (300, 512)[args.size == '512']
    rgb_means = ((104, 117, 123), (103.94, 116.78,
                                   123.68))[args.version == 'RFB_mobile']
    p = (0.6, 0.2)[args.version == 'RFB_mobile']
    # 738:6 classes ; 2392:7 ; 8718:6
    num_classes = (21, 81)[args.dataset == 'COCO']
    logging.info('dataset number of classes: {}'.format(num_classes))
    batch_size = args.batch_size
    weight_decay = 0.0005
    gamma = 0.1
    momentum = 0.9

    net = build_net('train', img_dim, num_classes)
    # print(net)
    if args.resume_net == None:
        base_weights = torch.load(args.basenet)
        from collections import OrderedDict
        print('Loading base network...')
        net.base.load_state_dict(base_weights)

        def xavier(param):
            init.xavier_uniform(param)

        def weights_init(m):
            for key in m.state_dict():
                if key.split('.')[-1] == 'weight':
                    if 'conv' in key:
                        init.kaiming_normal_(m.state_dict()[key],
                                             mode='fan_out')
                    if 'bn' in key:
                        m.state_dict()[key][...] = 1
                elif key.split('.')[-1] == 'bias':
                    m.state_dict()[key][...] = 0

        print('Initializing weights...')
        # initialize newly added layers' weights with kaiming_normal method
        net.extras.apply(weights_init)
        net.loc.apply(weights_init)
        net.conf.apply(weights_init)
        net.Norm.apply(weights_init)
        if args.version == 'RFB_E_vgg':
            net.reduce.apply(weights_init)
            net.up_reduce.apply(weights_init)
    else:
        # load resume network
        print('Loading resume network...')
        state_dict = torch.load(args.resume_net)
        # create new OrderedDict that does not contain `module.`
        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            head = k[:7]
            if head == 'module.':
                name = k[7:]  # remove `module.`
            else:
                name = k
            new_state_dict[name] = v
        net.load_state_dict(new_state_dict)

    if args.ngpu > 1:
        net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))

    if args.cuda:
        net.cuda()
        cudnn.benchmark = True

    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    # optimizer = optim.RMSprop(net.parameters(), lr=args.lr,alpha = 0.9, eps=1e-08,
    #                      momentum=args.momentum, weight_decay=args.weight_decay)
    criterion = MultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5, False)
    priorbox = PriorBox(cfg)
    with torch.no_grad():
        priors = priorbox.forward()
        if args.cuda:
            priors = priors.cuda()

    net.train()
    # loss counters
    loc_loss = 0  # epoch
    conf_loss = 0
    epoch = 0 + args.resume_epoch

    logging.info('Loading Dataset: {}'.format(args.dataset))
    if args.dataset == 'VOC':
        dataset = VOCDetection(VOCroot, train_sets,
                               preproc(img_dim, rgb_means, p),
                               AnnotationTransform())
    elif args.dataset == 'COCO':
        dataset = COCODetection(COCOroot, train_sets,
                                preproc(img_dim, rgb_means, p))
    else:
        print('Only VOC and COCO are supported now!')
        return

    epoch_size = len(dataset) // args.batch_size
    max_iter = args.max_epoch * epoch_size

    stepvalues_VOC = (150 * epoch_size, 200 * epoch_size, 250 * epoch_size)
    stepvalues_COCO = (90 * epoch_size, 120 * epoch_size, 140 * epoch_size)
    stepvalues = (stepvalues_VOC, stepvalues_COCO)[args.dataset == 'COCO']
    print('Training', args.version, 'on', dataset.name)
    step_index = 0

    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
    else:
        start_iter = 0

    lr = args.lr
    for iteration in range(start_iter, max_iter):
        if iteration % epoch_size == 0:
            # create batch iterator
            batch_iterator = iter(
                data.DataLoader(dataset,
                                batch_size,
                                shuffle=True,
                                num_workers=args.num_workers,
                                collate_fn=detection_collate))
            loc_loss = 0
            conf_loss = 0
            if (epoch % 10 == 0 and epoch > 0) or (epoch % 5 == 0
                                                   and epoch > 200):
                torch.save(
                    net.state_dict(), args.save_folder + args.version + '_' +
                    args.dataset + '_epoches_' + repr(epoch) + '.pth')
            epoch += 1

        load_t0 = time.time()
        if iteration in stepvalues:
            step_index += 1
        lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index,
                                  iteration, epoch_size)

        # load train data
        images, targets = next(batch_iterator)
        #print(np.sum([torch.sum(anno[:,-1] == 2) for anno in targets]))
        if args.cuda:
            images = Variable(images.cuda())
            targets = [Variable(anno.cuda()) for anno in targets]
        else:
            images = Variable(images)
            targets = [Variable(anno) for anno in targets]
        # forward
        t0 = time.time()
        out = net(images)
        # backprop
        optimizer.zero_grad()
        loss_l, loss_c = criterion(out, priors, targets)
        loss = loss_l + loss_c
        loss.backward()
        optimizer.step()
        t1 = time.time()
        loc_loss += loss_l.item()
        conf_loss += loss_c.item()
        load_t1 = time.time()
        if iteration % 10 == 0:
            print('Epoch:' + repr(epoch) + ' || epochiter: ' +
                  repr(iteration % epoch_size) + '/' + repr(epoch_size) +
                  '|| Totel iter ' + repr(iteration) +
                  ' || L: %.4f C: %.4f||' % (loss_l.item(), loss_c.item()) +
                  'Batch time: %.4f sec. ||' % (load_t1 - load_t0) +
                  'LR: %.8f' % (lr))
    torch.save(
        net.state_dict(),
        os.path.join(args.save_folder,
                     'Final_' + args.version + '_' + args.dataset + '.pth'))
コード例 #21
0
from layers.bbox_utils import match, match_ssd, decode

import matplotlib.pyplot as plt

dataset = WIDERDetection(cfg.TRAIN_FILE,
                         transform=S3FDValTransform(cfg.INPUT_SIZE),
                         train=False)

data_loader = data.DataLoader(dataset,
                              64,
                              num_workers=4,
                              shuffle=False,
                              collate_fn=detection_collate,
                              pin_memory=True)

anchor_boxes = PriorBox(cfg).forward()
num_priors = anchor_boxes.size(0)
variance = cfg.VARIANCE

savepath = 'tmp'
if not os.path.exists(savepath):
    os.makedirs(savepath)

filename = os.path.join(savepath, 'match_anchor.pkl')


def anchor_match_count():
    anchor_scale_map = {16: 0, 32: 0, 64: 0, 128: 0, 256: 0, 512: 0}
    thresh = cfg.OVERLAP_THRESH
    sfd_scales = []
    for idx, (_, target) in enumerate(data_loader):
コード例 #22
0
def test(img_path, model_path='weights/RFB_vgg_COCO_30.3.pth'):
    img_path = img_path
    trained_model = model_path
    if torch.cuda.is_available():
        cuda = True
    if 'mobile' in model_path:
        cfg = COCO_mobile_300
    else:
        cfg = COCO_300

    priorbox = PriorBox(cfg)
    with torch.no_grad():
        priors = priorbox.forward()
        if cuda:
            priors = priors.cuda()
    numclass = 81

    img = cv2.imread(img_path)
    scale = torch.Tensor(
        [img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
    if 'mobile' in model_path:
        net = build_rfb_mobilenet('test', 300, numclass)  # initialize detector
    else:
        net = build_rfb_vgg_net('test', 300, numclass)  # initialize detector

    transform = BaseTransform(net.size, (123, 117, 104), (2, 0, 1))
    with torch.no_grad():
        x = transform(img).unsqueeze(0)
        x = Variable(x)
        if cuda:
            x = x.cuda()
            scale = scale.cuda()
    state_dict = torch.load(trained_model)['state_dict']
    # create new OrderedDict that does not contain `module.`
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        head = k[:7]
        if head == 'module.':
            name = k[7:]  # remove `module.`
        else:
            name = k
        new_state_dict[name] = v
    net.load_state_dict(new_state_dict)
    net.eval()
    if cuda:
        net = net.cuda()
        cudnn.benchmark = True
    else:
        net = net.cpu()
    print('Finished loading model!')
    # print(net)
    detector = Detect(numclass, 0, cfg)

    tic = time.time()
    out = net(x)  # forward pass

    boxes, scores = detector.forward(out, priors)
    print('Finished in {}'.format(time.time() - tic))
    boxes = boxes[0]
    scores = scores[0]
    boxes *= scale
    boxes = boxes.cpu().numpy()
    scores = scores.cpu().numpy()
    # Create figure and axes
    # Display the image
    # scale each detection back up to the image
    for j in range(1, numclass):
        # print(max(scores[:, j]))
        inds = np.where(scores[:, j] > 0.6)[0]
        # conf > 0.6
        if inds is None:
            continue
        c_bboxes = boxes[inds]
        c_scores = scores[inds, j]
        c_dets = np.hstack((c_bboxes, c_scores[:,
                                               np.newaxis])).astype(np.float32,
                                                                    copy=False)
        keep = nms(c_dets, 0.6)
        c_dets = c_dets[keep, :]
        c_bboxes = c_dets[:, :4]

        # print(c_bboxes.shape)
        # print(c_bboxes.shape[0])
        if c_bboxes.shape[0] != 0:
            # print(c_bboxes.shape)
            print('{}: {}'.format(j, c_bboxes))
            for box in c_bboxes:
                cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]),
                              (0, 255, 0), 1, 0)
                cv2.putText(img, '{}'.format(j), (box[0], box[1]),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2,
                            cv2.LINE_AA)
    cv2.imshow('rr', img)
    cv2.waitKey(0)
コード例 #23
0
ファイル: multi_eval.py プロジェクト: xychen9459/TDRN
def test_net(save_folder, net, dataset, transform, top_k, detector, priors):
    """Test a Fast R-CNN network on an image database."""
    num_images = len(dataset)
    # all detections are collected into:score
    #    all_boxes[cls][image] = N x 5 array of detections in
    #    (x1, y1, x2, y2, score)
    all_boxes = [[[] for _ in range(num_images)]
                 for _ in range(len(labelmap) + 1)]

    # timers
    _t = {'im_detect': Timer(), 'misc': Timer()}
    all_time = 0.
    output_dir = get_output_dir(
        pkl_dir,
        args.iteration + '_' + args.dataset_name + '_' + args.set_file_name)
    det_file = os.path.join(output_dir, 'detections.pkl')
    output_dir = get_output_dir(output_dir, 'multi_test')
    ######################### Multiscale PriorBox #####################
    priorboxes = {}
    for v1 in multi_scale[str(ssd_dim)]:
        if not multi_cfg[str(v1)]:
            return ("not included this multi_scale")
        priorbox = PriorBox(multi_cfg[str(v1)])
        img_size = multi_cfg[str(v1)]['min_dim']
        with torch.no_grad():
            priorboxes[str(img_size)] = priorbox.forward().to(device)
    ########################## Detection ##############################
    for i in range(num_images):
        _t['im_detect'].tic()
        image = dataset.pull_image(i)
        h, w, _ = image.shape
        detections_multi = {}
        for v in multi_scale[str(ssd_dim)]:
            priors = priorboxes[str(v)]
            ssd_dim_temp = int(v)
            for loop in range(2):
                if (loop == 0):
                    im_trans = base_transform(image, ssd_dim_temp,
                                              dataset_mean)
                    im_trans = im_trans[:, :, (2, 1, 0)]
                else:
                    im_f = image.copy()
                    im_f = cv2.flip(im_f, 1)
                    im_trans = base_transform(im_f, ssd_dim_temp, dataset_mean)
                    im_trans = im_trans[:, :, (2, 1, 0)]
                with torch.no_grad():
                    x = torch.from_numpy(im_trans).unsqueeze(0).permute(
                        0, 3, 1, 2).to(device)
                    if 'RefineDet' in args.backbone and args.refine:
                        arm_loc, _, loc, conf = net(x)
                        detections = detector.forward(loc,
                                                      conf,
                                                      priors,
                                                      arm_loc_data=arm_loc)
                        detections_multi[str(ssd_dim) + '_' + str(v) + '_' +
                                         str(loop)] = detections.clone()
                    else:
                        loc, conf = net(x)
                        arm_loc = None
                        detections = detector.forward(loc,
                                                      conf,
                                                      priors,
                                                      arm_loc_data=arm_loc)
                        detections_multi[str(ssd_dim) + '_' + str(v) + '_' +
                                         str(loop)] = detections.clone()
        detect_time = _t['im_detect'].toc(average=False)
        if i > 10:
            all_time += detect_time
    ###################################################################
        for j in range(1, detections.size(1)):
            cls_dets = np.array([])
            for k, d in detections_multi.items():
                dets = d[0, j, :]
                if dets.sum() == 0:
                    continue
                mask = dets[:, 0].gt(0.).expand(dets.size(-1),
                                                dets.size(0)).t()
                dets = torch.masked_select(dets, mask).view(-1, dets.size(-1))
                boxes = dets[:, 1:-1] if dets.size(-1) == 6 else dets[:, 1:]
                if (k[-1] == '1'):
                    boxes[:, 0] = 1 - boxes[:, 0]
                    boxes[:, 2] = 1 - boxes[:, 2]
                    temp_swap = boxes[:, 0].clone()
                    boxes[:, 0] = boxes[:, 2]
                    boxes[:, 2] = temp_swap
                boxes[:, 0] *= w
                boxes[:, 2] *= w
                boxes[:, 1] *= h
                boxes[:, 3] *= h
                if k in ['320_192_0', '320_192_1', '512_320_0', '512_320_1']:
                    boxes_np = boxes.cpu().numpy()
                    index_temp = np.where(
                        np.maximum(boxes_np[:, 2] - boxes_np[:, 0] +
                                   1, boxes_np[:, 3] - boxes_np[:, 1] +
                                   1) > 32)[0]
                    if (not index_temp.size):
                        continue
                    else:
                        boxes = boxes[index_temp, :]
                elif k in ['320_320_0', '320_320_1', '512_512_0', '512_512_1']:
                    boxes_np = boxes.cpu().numpy()
                    index_temp = np.where(
                        np.maximum(boxes_np[:, 2] - boxes_np[:, 0] +
                                   1, boxes_np[:, 3] - boxes_np[:, 1] +
                                   1) > 0)[0]
                    if (not index_temp.size):
                        continue
                    else:
                        boxes = boxes[index_temp, :]
                elif k in ['320_384_0', '320_384_1', '512_640_0', '512_640_1']:
                    boxes_np = boxes.cpu().numpy()
                    index_temp = np.where(
                        np.minimum(boxes_np[:, 2] - boxes_np[:, 0] +
                                   1, boxes_np[:, 3] - boxes_np[:, 1] +
                                   1) < 160)[0]
                    if (not index_temp.size):
                        continue
                    else:
                        boxes = boxes[index_temp, :]
                elif k in ['320_448_0', '320_448_1']:
                    boxes_np = boxes.cpu().numpy()
                    index_temp = np.where(
                        np.minimum(boxes_np[:, 2] - boxes_np[:, 0] +
                                   1, boxes_np[:, 3] - boxes_np[:, 1] +
                                   1) < 128)[0]
                    if (not index_temp.size):
                        continue
                    else:
                        boxes = boxes[index_temp, :]
                elif k in ['320_512_0', '320_512_1']:
                    boxes_np = boxes.cpu().numpy()
                    index_temp = np.where(
                        np.minimum(boxes_np[:, 2] - boxes_np[:, 0] +
                                   1, boxes_np[:, 3] - boxes_np[:, 1] +
                                   1) < 96)[0]
                    if (not index_temp.size):
                        continue
                    else:
                        boxes = boxes[index_temp, :]
                elif k in ['320_576_0', '320_576_1']:
                    boxes_np = boxes.cpu().numpy()
                    index_temp = np.where(
                        np.minimum(boxes_np[:, 2] - boxes_np[:, 0] +
                                   1, boxes_np[:, 3] - boxes_np[:, 1] +
                                   1) < 64)[0]
                    if (not index_temp.size):
                        continue
                    else:
                        boxes = boxes[index_temp, :]
                elif k in [
                        '320_706_0', '320_706_1', '512_1216_0', '512_1216_1'
                ]:
                    boxes_np = boxes.cpu().numpy()
                    index_temp = np.where(
                        np.minimum(boxes_np[:, 2] - boxes_np[:, 0] +
                                   1, boxes_np[:, 3] - boxes_np[:, 1] +
                                   1) < 32)[0]
                    if (not index_temp.size):
                        continue
                    else:
                        boxes = boxes[index_temp, :]
                if (index_temp.size == 0):
                    continue
                scores = dets[index_temp, 0].cpu().numpy()
                cls_dets_temp = np.hstack((boxes.cpu().numpy(), scores[:, np.newaxis])) \
                    .astype(np.float32, copy=False)
                if (cls_dets.size == 0):
                    cls_dets = cls_dets_temp.copy()
                else:
                    cls_dets = np.concatenate((cls_dets, cls_dets_temp),
                                              axis=0)
            if (cls_dets.size != 0):
                cls_dets = bbox_vote(cls_dets)
                if (len(cls_dets) != 0):
                    all_boxes[j][i] = cls_dets
        print('im_detect: {:d}/{:d} {:.3f}s'.format(i + 1, num_images,
                                                    detect_time))
    FPS = (num_images - 10) / all_time
    print('FPS:', FPS)
    with open(det_file, 'wb') as f:
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)

    print('Evaluating detections')
    evaluate_detections(all_boxes, output_dir, dataset, FPS=FPS)
コード例 #24
0
    detect_model = build_net('test', ops.img_dim,
                             ops.num_classes)  # initialize detector

    #---------------------------------------------
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # chkpt = torch.load(ops.detect_model, map_location=device)
    chkpt = torch.load(ops.detect_model,
                       map_location=lambda storage, loc: storage)
    detect_model.load_state_dict(chkpt)
    detect_model.eval()  # 设置为前向推断模式

    acc_model(ops, detect_model)
    detect_model = detect_model.to(device)

    detector = Detect(ops.num_classes, 0, cfg)  #  num_classes, bkg_label, cfg
    priorbox = PriorBox(cfg, debug_=False)
    with torch.no_grad():
        priors = priorbox.forward()
        if use_cuda:
            priors = priors.cuda()

    video_capture = cv2.VideoCapture(ops.test_path)
    ret, img_raw = video_capture.read()
    if ret:
        # scale = torch.Tensor([img_raw.shape[1], img_raw.shape[0],img_raw.shape[1], img_raw.shape[0]])
        scale = [
            img_raw.shape[1], img_raw.shape[0], img_raw.shape[1],
            img_raw.shape[0]
        ]
        # if use_cuda:
        #     scale = scale.cuda()
コード例 #25
0
def demo(v_f):
    cfg = Config.fromfile(config_f)
    anchor_config = anchors(cfg)
    priorbox = PriorBox(anchor_config)
    net = build_net('test',
                    size=cfg.model.input_size,
                    config=cfg.model.m2det_config)
    init_net(net, cfg, checkpoint_path)
    net.eval().to(device)
    with torch.no_grad():
        priors = priorbox.forward().to(device)
    _preprocess = BaseTransform(
        cfg.model.input_size, cfg.model.rgb_means, (2, 0, 1))
    detector = Detect(cfg.model.m2det_config.num_classes,
                      cfg.loss.bkg_label, anchor_config)
    logging.info('detector initiated.')

    cap = cv2.VideoCapture(v_f)
    logging.info('detect on: {}'.format(v_f))
    logging.info('video width: {}, height: {}'.format(int(cap.get(3)), int(cap.get(4))))
    out_video = cv2.VideoWriter("result.mp4", cv2.VideoWriter_fourcc(*'MJPG'), 24, (int(cap.get(3)), int(cap.get(4))))

    while True:
        ret, image = cap.read()
        if not ret:
            out_video.release()
            cv2.destroyAllWindows()
            cap.release()
            break
        w, h = image.shape[1], image.shape[0]
        img = _preprocess(image).unsqueeze(0).to(device)
        scale = torch.Tensor([w, h, w, h])
        out = net(img)
        boxes, scores = detector.forward(out, priors)
        boxes = (boxes[0]*scale).cpu().numpy()
        scores = scores[0].cpu().numpy()
        allboxes = []
        for j in range(1, cfg.model.m2det_config.num_classes):
            inds = np.where(scores[:, j] > cfg.test_cfg.score_threshold)[0]
            if len(inds) == 0:
                continue
            c_bboxes = boxes[inds]
            c_scores = scores[inds, j]
            c_dets = np.hstack((c_bboxes, c_scores[:, np.newaxis])).astype(
                np.float32, copy=False)
            soft_nms = cfg.test_cfg.soft_nms
            # min_thresh, device_id=0 if cfg.test_cfg.cuda else None)
            keep = nms(c_dets, cfg.test_cfg.iou, force_cpu=soft_nms)
            keep = keep[:cfg.test_cfg.keep_per_class]
            c_dets = c_dets[keep, :]
            allboxes.extend([_.tolist()+[j] for _ in c_dets])
        if len(allboxes) > 0:
            allboxes = np.array(allboxes)
            # [boxes, scores, label_id] -> [id, score, boxes] 0, 1, 2, 3, 4, 5
            allboxes = allboxes[:, [5, 4, 0, 1, 2, 3]]
            logging.info('allboxes shape: {}'.format(allboxes.shape))
            res = visualize_det_cv2(image, allboxes, classes=classes, thresh=0.2)
            # res = visualize_det_cv2_fancy(image, allboxes, classes=classes, thresh=0.2, r=4, d=6)
            cv2.imshow('rr', res)
            out_video.write(res)
            cv2.waitKey(1)
コード例 #26
0
if args.cuda:
    net.cuda()
    cudnn.benchmark = True

# SGD优化策略
optimizer = optim.SGD(net.parameters(),
                      lr=args.lr,
                      momentum=args.momentum,
                      weight_decay=args.weight_decay)
#optimizer = optim.RMSprop(net.parameters(), lr=args.lr,alpha = 0.9, eps=1e-08,
#                      momentum=args.momentum, weight_decay=args.weight_decay)

criterion = MultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5,
                         False)  # 沿用了SSD的MultiBoxLoss,可以参照multibox_loss.py
priorbox = PriorBox(cfg)  # 特征金字塔上的先验prior_box,可结合prior_box.py理解
with torch.no_grad():
    priors = priorbox.forward()
    if args.cuda:
        priors = priors.cuda()


def train():
    net.train()
    # loss counters
    loc_loss = 0  # epoch
    conf_loss = 0
    epoch = 0 + args.resume_epoch  # finetune方式地训练
    print('Loading Dataset...')
    # 加载训练、验证集,preproc类可以参照data_augment.py函数,与SSD数据增强方式一致
    if args.dataset == 'VOC':
コード例 #27
0
logger = set_logger(args.tensorboard)
global cfg
cfg = Config.fromfile(args.config)
net = get_network(build_net, cfg, args.dataset)
init_net(net, cfg, args.resume_net)  # init the network with pretrained
# weights or resumed weights

if args.ngpu > 1:
    net = torch.nn.DataParallel(net)
if cfg.train_cfg.cuda:
    net.cuda()
    cudnn.benckmark = True

optimizer = set_optimizer(net, cfg)
criterion = set_criterion(cfg, args.dataset)
priorbox = PriorBox(anchors(cfg.model, args.dataset))

with torch.no_grad():
    priors = priorbox.forward()
    if cfg.train_cfg.cuda:
        priors = priors.cuda()

if __name__ == '__main__':
    net.train()
    epoch = args.resume_epoch
    print_info('===> Loading Dataset...', ['yellow', 'bold'])
    dataset = get_dataloader(cfg, args.dataset, 'train_sets')
    epoch_size = len(dataset) // (cfg.train_cfg.per_batch_size * args.ngpu)
    max_iter = getattr(cfg.train_cfg.step_lr, args.dataset)[-1] * epoch_size
    stepvalues = [
        _ * epoch_size
コード例 #28
0
    def Model_Params(self, model_dir="output", use_gpu=True):
        '''
        User Function - Set Model Params

        Args:
            model_dir (str): Select the right model name as per training
            model_path (str): Relative path to params file
            use_gpu (bool): If True use GPU else run on CPU
        Returns:
            None
            
        '''

        f = open(model_dir +"/config_final.py", 'r');
        lines = f.read();
        f.close();

        if(not use_gpu):
            lines = lines.replace("cuda=True",
                                    "cuda=False");

        f = open(model_dir +"/config_test.py", 'w');
        f.write(lines);
        f.close();


        print("Loading model for inference");
        self.system_dict["cfg"] = Config.fromfile(model_dir +"/config_test.py")
        anchor_config = anchors(self.system_dict["cfg"].model)
        self.system_dict["priorbox"] = PriorBox(anchor_config)
        self.system_dict["net"] = build_net('test', self.system_dict["cfg"].model.input_size, self.system_dict["cfg"].model)
        init_net(self.system_dict["net"], self.system_dict["cfg"], model_dir + "/VOC/Final_Pelee_VOC_size304.pth")
        print_info('===> Finished constructing and loading model', ['yellow', 'bold'])
        self.system_dict["net"].eval()

        with torch.no_grad():
            self.system_dict["priors"] = self.system_dict["priorbox"].forward()
            if self.system_dict["cfg"].test_cfg.cuda:
                self.system_dict["net"] = self.system_dict["net"].cuda()
                self.system_dict["priors"] = self.system_dict["priors"].cuda()
                cudnn.benchmark = True
            else:
                self.system_dict["net"] = self.system_dict["net"].cpu()
        self.system_dict["_preprocess"] = BaseTransform(self.system_dict["cfg"].model.input_size, 
                                        self.system_dict["cfg"].model.rgb_means, (2, 0, 1))
        self.system_dict["num_classes"] = self.system_dict["cfg"].model.num_classes
        self.system_dict["detector"] = Detect(self.system_dict["num_classes"],
                                                self.system_dict["cfg"].loss.bkg_label, anchor_config)
                
        print("Done....");


        print("Loading other params");
        base = int(np.ceil(pow(self.system_dict["num_classes"], 1. / 3)))
        self.system_dict["colors"] = [self._to_color(x, base)
                  for x in range(self.system_dict["num_classes"])]
        cats = ['__background__'];
        f = open(self.system_dict["class_list"]);
        lines = f.readlines();
        f.close();
        for i in range(len(lines)):
            if(lines != ""):
                cats.append(lines[i][:len(lines[i])-1])
        self.system_dict["labels"] = cats;
        print("Done....");
コード例 #29
0
parser.add_argument('--show',
                    action='store_true',
                    help='Whether to display the images')
args = parser.parse_args()

print_info(
    ' ----------------------------------------------------------------------\n'
    '|                       M2Det Demo Program                             |\n'
    ' ----------------------------------------------------------------------',
    ['yellow', 'bold'])

global cfg
cfg = Config.fromfile(args.config)
anchor_config = anchors(cfg)
print_info('The Anchor info: \n{}'.format(anchor_config))
priorbox = PriorBox(anchor_config)
net = build_net('test',
                size=cfg.model.input_size,
                config=cfg.model.m2det_config)
init_net(net, cfg, args.trained_model)
print_info('===> Finished constructing and loading model', ['yellow', 'bold'])
net.eval()
with torch.no_grad():
    priors = priorbox.forward()
    if cfg.test_cfg.cuda:
        net = net.cuda()
        priors = priors.cuda()
        cudnn.benchmark = True
    else:
        net = net.cpu()
_preprocess = BaseTransform(cfg.model.input_size, cfg.model.rgb_means,
コード例 #30
0
def train():
    args = parse_args()
    if not os.path.exists(args.save_folder):
        os.mkdir(args.save_folder)
    if args.dataset == 'VOC':
        train_sets = [('2007', 'trainval'), ('2012', 'trainval')]
        cfg = (VOC_300, VOC_512)[args.size == '512']
    else:
        train_sets = [('2017', 'train'), ('2014', 'val')]
        cfg = (COCO_300, COCO_512)[args.size == '512']

    if args.version == 'RFB_vgg':
        from models.RFB_Net_vgg import build_rfb_vgg_net
    elif args.version == 'RFB_E_vgg':
        from models.RFB_Net_E_vgg import build_net
    elif args.version == 'RFB_mobile':
        from models.RFB_Net_mobile import build_rfb_mobilenet
        cfg = COCO_mobile_300
    else:
        print('Unkown version!')

    img_dim = (300, 512)[args.size == '512']
    rgb_means = ((104, 117, 123), (103.94, 116.78, 123.68))[args.version == 'RFB_mobile']
    p = (0.6, 0.2)[args.version == 'RFB_mobile']
    num_classes = (21, 81)[args.dataset == 'COCO']
    batch_size = args.batch_size

    print('Loading Dataset...')
    if args.dataset == 'VOC':
        dataset = VOCDetection(VOCroot, train_sets, preproc(img_dim, rgb_means, p), AnnotationTransform())
    elif args.dataset == 'COCO':
        dataset = COCODetection(COCOroot, train_sets, preproc(img_dim, rgb_means, p))
    else:
        print('Only VOC and COCO are supported now!')
        return

    net = build_rfb_mobilenet('train', img_dim, num_classes)
    if args.resume_net:
        # load resume network
        print('Loading resume network...')
        state_dict = torch.load(args.resume_net)
        # create new OrderedDict that does not contain `module.`
        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            head = k[:7]
            if head == 'module.':
                name = k[7:]  # remove `module.`
            else:
                name = k
            new_state_dict[name] = v
        net.load_state_dict(new_state_dict)
        print('Resume model load done.')
    else:
        base_weights = torch.load(args.basenet)
        print('Loading base network...')
        net.base.load_state_dict(base_weights)
        print('Base weights load done.')
        # def xavier(param):
        #     init.xavier_uniform(param)
        #
        # def weights_init(m):
        #     for key in m.state_dict():
        #         if key.split('.')[-1] == 'weight':
        #             if 'conv' in key:
        #                 init.kaiming_normal_(m.state_dict()[key], mode='fan_out')
        #             if 'bn' in key:
        #                 m.state_dict()[key][...] = 1
        #         elif key.split('.')[-1] == 'bias':
        #             m.state_dict()[key][...] = 0
    net.to(device)

    optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    criterion = MultiBoxLoss(num_classes, 0.5, True, 0, True, 3, 0.5, False).to(device)
    priorbox = PriorBox(cfg)
    with torch.no_grad():
        priors = priorbox.forward()
        priors.to(device)

    net.train()
    # loss counters
    loc_loss = 0  # epoch
    conf_loss = 0
    epoch = 0 + args.resume_epoch
    epoch_size = len(dataset) // args.batch_size
    max_iter = args.max_epoch * epoch_size

    stepvalues_VOC = (150 * epoch_size, 200 * epoch_size, 250 * epoch_size)
    stepvalues_COCO = (90 * epoch_size, 120 * epoch_size, 140 * epoch_size)
    stepvalues = (stepvalues_VOC, stepvalues_COCO)[args.dataset == 'COCO']
    print('Training', args.version, 'on', dataset.name)
    step_index = 0

    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
    else:
        start_iter = 0

    lr = args.lr
    try:
        for iteration in range(start_iter, max_iter):
            if iteration % epoch_size == 0:
                # create batch iterator
                batch_iterator = iter(data.DataLoader(dataset, batch_size,
                                                      shuffle=True, num_workers=args.num_workers,
                                                      collate_fn=detection_collate))
                loc_loss = 0
                conf_loss = 0
                if (epoch % 10 == 0 and epoch > 0) or (epoch % 5 == 0 and epoch > 200):
                    torch.save(net.state_dict(), args.save_folder + args.version + '_' + args.dataset + '_epoches_' +
                               repr(epoch) + '.pth')
                epoch += 1

            load_t0 = time.time()
            if iteration in stepvalues:
                step_index += 1
            lr = adjust_learning_rate(args, optimizer, args.gamma, epoch, step_index, iteration, epoch_size)

            # load train data
            images, targets = next(batch_iterator)
            images = Variable(images.to(device))
            targets = [Variable(anno.to(device)) for anno in targets]
            print(images)
            print(targets)

            # forward
            t0 = time.time()
            out = net(images)
            # backprop
            optimizer.zero_grad()
            loss_l, loss_c = criterion(out, priors, targets)
            loss = loss_l + loss_c
            loss.backward()
            optimizer.step()
            t1 = time.time()
            loc_loss += loss_l.item()
            conf_loss += loss_c.item()
            load_t1 = time.time()
            if iteration % 10 == 0:
                print('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size)
                      + '|| Totel iter ' +
                      repr(iteration) + ' || L: %.4f C: %.4f||' % (
                          loss_l.item(), loss_c.item()) +
                      'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' % (lr))
    except KeyboardInterrupt:
        print('Interrupted, try saving model...')
        torch.save(net.state_dict(), args.save_folder +
                   'Final_' + args.version + '_' + args.dataset + '.pth')
    torch.save(net.state_dict(), args.save_folder +
               'Final_' + args.version + '_' + args.dataset + '.pth')