def DatasetSync(dataset='VOC', split='training'):
    if dataset == 'VOC':
        train_sets = [('2007', 'trainval'), ('2012', 'trainval')]
        # DataRoot=os.path.join(args.data_root,'VOCdevkit')
        DataRoot = args.data_root
        dataset = VOCDetection(DataRoot, train_sets,
                               SSDAugmentation(args.dim, means),
                               AnnotationTransform())
    elif dataset == 'kitti':
        DataRoot = os.path.join(args.data_root, 'kitti')
        dataset = KittiLoader(DataRoot,
                              split=split,
                              img_size=(1000, 300),
                              transforms=SSDAugmentation((1000, 300), means),
                              target_transform=AnnotationTransform_kitti())
    elif dataset == 'COCO':
        image_set = ['train2014', 'valminusminival2014']
        image_set = 'trainval35k'
        DataRoot = COCO_ROOT
        dataset = COCODetection(root=DataRoot,
                                transform=SSDAugmentation(args.dim, means))
    elif dataset == 'tme':
        train_sets = [('train_mix_cut_bot')]
        DataRoot = '/home/kiminhan/datasets/'
        dataset = TMEDetection(DataRoot, train_sets,
                               SSDAugmentation(args.dim, means),
                               AnnotationTransform())

    return dataset
Exemple #2
0
def run_evaluation(input_dim,net_name, saved_model_name,skip=0):


    num_classes = len(CLASSES) + 1 # +1 background
    cfg = get_config(net_name+str(input_dim))
    net_class = get_net(net_name)
    net = net_class(input_dim,'test', num_classes,cfg) # initialize SSD
    net.load_state_dict(torch.load(saved_model_name))
    net.eval()
    print('Finished loading model!')
    # load data
    if DATASET_NAME == 'KAIST':
        dataset = GetDataset(args.voc_root, BaseTransform(input_dim, dataset_mean), AnnotationTransform(),dataset_name='test20',skip=skip)
    elif DATASET_NAME == 'VOC0712':
        dataset = GetDataset(args.voc_root, BaseTransform(input_dim, dataset_mean), AnnotationTransform(),[('2007','test')])
    elif DATASET_NAME == 'Sensiac':
        dataset = GetDataset(args.voc_root, BaseTransform(input_dim, dataset_mean), AnnotationTransform(),dataset_name='day_test10')
    elif DATASET_NAME == 'Caltech':
        dataset = GetDataset(args.voc_root, BaseTransform(input_dim, dataset_mean), AnnotationTransform(), dataset_name='test01', skip=skip)
    if args.cuda:
        net = net.cuda()
        cudnn.benchmark = True
    # evaluation
    map, mam = test_net(args.save_folder, net, args.cuda, dataset,
             BaseTransform(net.size, dataset_mean), args.top_k, input_dim,
             thresh=args.confidence_threshold)
    return map, mam
Exemple #3
0
def test_model(trained_model):
    # load net
    img_dim = (300, 512)[args.size == '512']
    num_classes = (21, 81)[args.dataset == 'COCO']
    net = build_net('test', img_dim, num_classes)  # initialize detector
    state_dict = torch.load(trained_model)
    # create new OrderedDict that does not contain `module.`

    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        head = k[:7]
        if head == 'module.':
            name = k[7:]  # remove `module.`
        else:
            name = k
        new_state_dict[name] = v
    net.load_state_dict(new_state_dict)
    net.eval()
    print('Finished loading model!')
    # print(net)
    # load data
    if args.dataset == 'VOC':
        testset = VOCDetection(VOCroot, [('2007', 'test')], None,
                               AnnotationTransform())
    elif args.dataset == 'VOC2012':
        testset = VOCDetection(VOCroot, [('2012', 'test')], None,
                               AnnotationTransform())
    elif args.dataset == 'COCO':
        testset = COCODetection(COCOroot, [('2014', 'minival')], None)
        # COCOroot, [('2015', 'test-dev')], None)
    else:
        print('Only VOC and COCO dataset are supported now!')
    if args.cuda:
        net = net.cuda()
        cudnn.benchmark = True
    else:
        net = net.cpu()

    # evaluation
    #top_k = (300, 200)[args.dataset == 'COCO']

    top_k = 200
    detector = Detect(num_classes, 0, cfg)
    save_folder = os.path.join(args.save_folder, args.dataset)
    rgb_means = ((104, 117, 123), (103.94, 116.78,
                                   123.68))[args.version == 'RFB_mobile']
    test_net(save_folder,
             net,
             detector,
             args.cuda,
             testset,
             BaseTransform(net.size, rgb_means, (2, 0, 1)),
             top_k,
             thresh=0.01)
Exemple #4
0
def main(args):
    img_dim = 300
    set_type = 'test'
    use_voc_07_ap_metric = True

    data_iter = VOCDetection(args.data_root, [('2007', set_type)],
                             BaseTransform(img_dim, (104, 117, 123)),
                             AnnotationTransform())
    print('Using data iterator "{}"'.format(data_iter.__class__.__name__))
    num_classes = data_iter.num_classes()

    net = build_ssd('test', img_dim, num_classes)  # initialize SSD
    net.load_state_dict(torch.load(args.trained_model))
    net.eval()
    print('Finished loading model! {} Cuda'.format(
        'Using' if args.cuda else 'No'))

    if args.cuda:
        net = net.cuda()
        cudnn.benchmark = True
    eval_ssd(data_iter,
             net,
             args.save_path,
             cuda=args.cuda,
             use_voc_07=use_voc_07_ap_metric)
def main():

    means = (104, 117, 123)  # only support voc now

    exp_name = 'CONV-SSD-{}-{}-bs-{}-lr-{:05d}'.format(args.dataset,
                                                       args.input_type,
                                                       args.batch_size,
                                                       int(args.lr * 100000))

    args.save_root += args.dataset + '/'
    args.data_root += args.dataset + '/'
    args.listid = '01'  ## would be usefull in JHMDB-21
    print('Exp name', exp_name, args.listid)
    for iteration in [int(itr) for itr in args.eval_iter.split(',')]:
        log_file = open(
            args.save_root + 'cache/' + exp_name +
            "/testing-{:d}.log".format(iteration), "w", 1)
        log_file.write(exp_name + '\n')
        trained_model_path = args.save_root + 'cache/' + exp_name + '/ssd300_ucf24_' + repr(
            iteration) + '.pth'
        log_file.write(trained_model_path + '\n')
        num_classes = len(CLASSES) + 1  #7 +1 background
        net = build_ssd("train", 300, num_classes)  # initialize SSD
        from collections import OrderedDict
        new_state_dict = OrderedDict()
        vgg_weights = torch.load(trained_model_path)
        for k, v in vgg_weights.items():
            namekey = k[7:]  # remove `module.`
            new_state_dict[namekey] = v
        net.load_state_dict(new_state_dict)
        net = torch.nn.DataParallel(net, device_ids=[0, 1, 2, 3])
        net.eval()
        if args.cuda:
            net = net.cuda()
            cudnn.benchmark = True
        print('Finished loading model %d !' % iteration)
        # Load dataset
        dataset = UCF24Detection(args.data_root,
                                 'test',
                                 BaseTransform(args.ssd_dim, means),
                                 AnnotationTransform(),
                                 input_type=args.input_type,
                                 full_test=True)
        # evaluation
        torch.cuda.synchronize()
        tt0 = time.perf_counter()
        log_file.write('Testing net \n')
        mAP, ap_all, ap_strs = test_net(net, args.save_root, exp_name,
                                        args.input_type, dataset, iteration,
                                        num_classes)
        for ap_str in ap_strs:
            print(ap_str)
            log_file.write(ap_str + '\n')
        ptr_str = '\nMEANAP:::=>' + str(mAP) + '\n'
        print(ptr_str)
        log_file.write(ptr_str)

        torch.cuda.synchronize()
        print('Complete set time {:0.2f}'.format(time.perf_counter() - tt0))
        log_file.close()
Exemple #6
0
def create_dataset(opts, phase=None):
    means = (104, 117, 123)
    name = opts.dataset
    home = os.path.expanduser("~")
    DataAug = SSDAugmentation if opts.phase == 'train' else BaseTransform

    if name == 'voc':
        print('Loading Dataset...')
        sets = [('2007', 'trainval'), ('2012', 'trainval')] if opts.phase == 'train' else [('2007', 'test')]
        data_root = os.path.join(home, "data/VOCdevkit/")
        from data import VOCDetection
        dataset = VOCDetection(data_root, sets,
                               DataAug(opts.ssd_dim, means),
                               AnnotationTransform())
    elif name == 'coco':
        data_root = os.path.join(home, 'dataset/coco')

        from data import COCODetection
        dataset = COCODetection(root=data_root, phase=opts.phase,
                                transform=DataAug(opts.ssd_dim, means))
        # dataset = dset.CocoDetection(root=(data_root + '/train2014'),
        #                              annFile=(data_root + '/annotations/' + anno_file),
        #                              transform=transforms.ToTensor())
    else:
        raise NameError('Unknown dataset')

    show_phase = opts.phase if phase is None else phase
    print('{:s} on {:s}'.format(show_phase.upper(), dataset.name))

    return dataset
Exemple #7
0
def main():
    global args
    args = arg_parse()
    bgr_means = (104, 117, 123)
    dataset_name = args.dataset
    size = args.size
    top_k = args.top_k
    thresh = args.confidence_threshold
    use_refine = False
    if args.version.split("_")[0] == "refine":
        use_refine = True
    if dataset_name[0] == "V":
        cfg = cfg_dict["VOC"][args.version][str(size)]
        trainvalDataset = VOCDetection
        dataroot = VOCroot
        targetTransform = AnnotationTransform()
        valSet = datasets_dict["VOC2007"]
        classes = VOC_CLASSES
    else:
        cfg = cfg_dict["COCO"][args.version][str(size)]
        trainvalDataset = COCODetection
        dataroot = COCOroot
        targetTransform = None
        valSet = datasets_dict["COCOval"]
        classes = COCO_CLASSES
    num_classes = cfg['num_classes']
    save_folder = args.save_folder
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)

    if args.cuda and torch.cuda.is_available():
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
    else:
        torch.set_default_tensor_type('torch.FloatTensor')
    net = model_builder(args.version, cfg, "test", int(size), num_classes,
                        args.channel_size)
    state_dict = torch.load(args.weights)
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        head = k[:7]
        if head == 'module.':
            name = k[7:]  # remove `module.`
        else:
            name = k
        new_state_dict[name] = v
    net.load_state_dict(new_state_dict)
    detector = Detect(num_classes, 0, cfg, use_arm=use_refine)
    img_wh = cfg["img_wh"]
    ValTransform = BaseTransform(img_wh, bgr_means, (2, 0, 1))
    input_folder = args.images
    for item in os.listdir(input_folder)[:]:
        img_path = os.path.join(input_folder, item)
        img = cv2.imread(img_path)
        dets = im_detect(img, net, detector, cfg, ValTransform, thresh)
        draw_img = draw_rects(img, dets, classes)
        out_img_name = "output_" + item
        save_path = os.path.join(save_folder, out_img_name)
        cv2.imwrite(save_path, img)
def demo(img_id=0):
    net = build_ssd('test', 300, 21)  # initialize SSD
    print(net)
    net.load_weights(
        '/media/sunwl/Datum/Projects/GraduationProject/SSD_VHR_300/weights/ssd300_mAP_77.43_v2.pth'
    )
    testset = VOCDetection(VOCroot, [('2012', 'val')], None,
                           AnnotationTransform())
    # image = testset.pull_image(img_id)
    image = cv2.imread('demos/02.png')
    rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    # View the sampled input image before transform
    plt.figure(figsize=(10, 10))
    plt.imshow(rgb_image)

    x = cv2.resize(rgb_image, (300, 300)).astype(np.float32)
    x -= (104.0, 117.0, 123.0)
    x = x.astype(np.float32)
    x = x[:, :, ::-1].copy()
    x = torch.from_numpy(x).permute(2, 0, 1)

    xx = Variable(x.unsqueeze(0))  # wrap tensor in Variable
    if torch.cuda.is_available():
        xx = xx.cuda()
    y = net(xx)

    plt.figure(figsize=(10, 10))
    colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
    plt.imshow(rgb_image.astype(np.uint8))  # plot the image for matplotlib
    currentAxis = plt.gca()

    detections = y.data

    # scale each detection back up to the image
    scale = torch.Tensor(rgb_image.shape[1::-1]).repeat(2)
    for i in range(detections.size(1)):
        j = 0
        while detections[0, i, j, 0] >= 0.6:
            score = detections[0, i, j, 0]
            label_name = labels[i - 1]
            display_txt = '%s: %.2f' % (label_name, score)
            pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
            coords = (pt[0], pt[1]), pt[2] - pt[0] + 1, pt[3] - pt[1] + 1
            color = colors[i]
            currentAxis.add_patch(
                plt.Rectangle(*coords,
                              fill=False,
                              edgecolor=color,
                              linewidth=2))
            currentAxis.text(pt[0],
                             pt[1],
                             display_txt,
                             bbox={
                                 'facecolor': color,
                                 'alpha': 0.5
                             })
            j += 1
    plt.show()
Exemple #9
0
def train():
    net.train()
    epoch = 0 + args.resume_epoch
    print('Loading Dataset...')

    dataset = VOCDetection(training_dataset, preproc(img_dim, rgb_mean), AnnotationTransform())
    # dataset = AFLW(training_dataset, npy_file, preproc_(img_dim, rgb_mean))
    epoch_size = math.ceil(len(dataset) / batch_size)
    max_iter = max_epoch * epoch_size

    stepvalues = (200 * epoch_size, 250 * epoch_size)
    step_index = 0

    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
    else:
        start_iter = 0

    for iteration in range(start_iter, max_iter):
        if iteration % epoch_size == 0:
            # create batch iterator
            batch_iterator = iter(data.DataLoader(dataset, batch_size, shuffle=True, num_workers=num_workers, collate_fn=detection_collate))
            if (epoch % 10 == 0 and epoch > 0) or (epoch % 5 == 0 and epoch > 200):
                torch.save(net.state_dict(), save_folder + '{}_'.format(args.save_name) + str(epoch) + '.pth')
            epoch += 1

        load_t0 = time.time()
        if iteration in stepvalues:
            step_index += 1
        lr = adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size)

        # load train data
        images, targets = next(batch_iterator)
        # print("trainning batch:", len(images), len(targets), targets[0].shape)
        images = images.to(device)
        targets = [anno.to(device) for anno in targets]
        

        # forward
        out = net(images)

        # backprop
        optimizer.zero_grad()
        loss_l, loss_c = criterion(out, priors, targets)
        loss = cfg['loc_weight'] * loss_l + loss_c
        # loss_l, loss_c, loss_f = criterion(out, priors, targets)
        # loss = cfg['loc_weight'] * loss_l + loss_c + cfg['loc_five_weight'] * loss_f
        loss.backward()
        optimizer.step()
        load_t1 = time.time()
        batch_time = load_t1 - load_t0
        eta = int(batch_time * (max_iter - iteration))
        
        print('Epoch:{}/{} || Epochiter: {}/{} || Iter: {}/{} || L: {:.4f} C: {:.4f} || F:{:.4f} || LR: {:.4f} || Batchtime: {:.4f} s || ETA: {}'.format(epoch, max_epoch, (iteration % epoch_size) + 1, epoch_size, iteration + 1, max_iter, loss_l.item(), loss_c.item(), loss_f.item(), lr, batch_time, str(datetime.timedelta(seconds=eta))))
        
    torch.save(net.state_dict(), save_folder + 'Final_{}_'.format(args.save_name))
Exemple #10
0
def train():
    net.train()
    epoch = 0 + args.resume_epoch
    print('Loading Dataset...')

    dataset = VOCDetection(args.training_dataset, preproc(img_dim, rgb_means), AnnotationTransform())

    epoch_size = math.ceil(len(dataset) / args.batch_size)
    max_iter = args.max_epoch * epoch_size

    stepvalues = (200 * epoch_size, 250 * epoch_size)
    step_index = 0

    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
    else:
        start_iter = 0

    for iteration in range(start_iter, max_iter):
        if iteration % epoch_size == 0:
            # create batch iterator
            batch_iterator = iter(data.DataLoader(dataset, batch_size, shuffle=True, num_workers=args.num_workers, collate_fn=detection_collate))
            if (epoch % 1 == 0 and epoch > 0) or (epoch % 1 == 0 and epoch > 200):
                torch.save(net.state_dict(), args.save_folder + 'Face_epoch_' + repr(epoch) + '.pth')
            epoch += 1

        load_t0 = time.time()
        if iteration in stepvalues:
            step_index += 1
        lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index, iteration, epoch_size)

        # load train data
        images, targets = next(batch_iterator)
        images = images.to(device)
        targets = [anno.to(device) for anno in targets]

        # forward
        out = net(images)
        
        # backprop
        optimizer.zero_grad()
        loss_l, loss_c = criterion(out, priors, targets)
        #loss = cfg['loc_weight'] * loss_l + loss_c
        loss = loss_l + loss_c        
        loss.backward()
        optimizer.step()
        load_t1 = time.time()
        #print('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size) +
        #      '|| Totel iter ' + repr(iteration) + ' || L: %.4f C: %.4f||' % (cfg['loc_weight']*loss_l.item(), loss_c.item()) +
        #      'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' % (lr))

        print('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size) +
              '|| Totel iter ' + repr(iteration) + ' || L: %.4f C: %.4f||' % (loss_l.item(), loss_c.item()) +
              'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' % (lr))

    torch.save(net.state_dict(), args.save_folder + 'Final.pth')
Exemple #11
0
def main():

    means = (104, 117, 123)  # only support voc now

    exp_name = 'CONV-SSD-{}-{}-bs-{}-{}-lr-{:05d}'.format(
        args.dataset, args.input_type, args.batch_size, args.basenet[:-14],
        int(args.lr * 100000))

    args.save_root += args.dataset + '/'
    args.data_root += args.dataset + '/'
    args.listid = '01'  ## would be usefull in JHMDB-21
    print('Exp name', exp_name, args.listid)
    # for iteration in [int(itr) for itr in args.eval_iter.split(',')]:
    # log_file = open(args.save_root + 'cache/' + exp_name + "/testing-{:d}.log".format(iteration), "w", 1)
    # log_file.write(exp_name + '\n')
    # trained_model_path = args.save_root + 'cache/' + exp_name + '/ssd300_ucf24_' + repr(iteration) + '.pth'
    trained_model_path = "/data-sdb/data/jiagang.zhu/realtime/ucf24/rgb-ssd300_ucf24_120000.pth"  ###0.6357
    # log_file.write(trained_model_path+'\n')
    num_classes = len(CLASSES) + 1  #7 +1 background
    net = build_ssd(300, num_classes)  # initialize SSD
    net.load_state_dict(torch.load(trained_model_path))
    net.eval()
    if args.cuda:
        net = net.cuda()
        cudnn.benchmark = True
    # print('Finished loading model %d !' % iteration)
    # Load dataset
    dataset = UCF24Detection(args.data_root,
                             'test',
                             BaseTransform(args.ssd_dim, means),
                             AnnotationTransform(),
                             input_type=args.input_type,
                             full_test=False)  ###full test = true 0.6357
    # evaluation
    torch.cuda.synchronize()
    tt0 = time.perf_counter()
    # log_file.write('Testing net \n')
    iteration = 100000
    mAP, ap_all, ap_strs = test_net(net, args.save_root, exp_name,
                                    args.input_type, dataset, iteration,
                                    num_classes)
    for ap_str in ap_strs:
        print(ap_str)
        # log_file.write(ap_str + '\n')
    ptr_str = '\nMEANAP:::=>' + str(mAP) + '\n'
    print(ptr_str)
    # log_file.write(ptr_str)

    torch.cuda.synchronize()
    print('Complete set time {:0.2f}'.format(time.perf_counter() - tt0))
Exemple #12
0
def main():

    means = (104, 117, 123)  # only support voc now

    exp_name = 'CONV-SSD-{}-{}-bs-{}-{}-lr-{:05d}'.format(
        args.dataset, args.input_type, args.batch_size, args.basenet[:-14],
        int(args.lr * 100000))

    args.save_root += args.dataset + '/'
    args.data_root += args.dataset + '/'
    args.listid = '099'  ## would be usefull in JHMDB-21
    print('Exp name', exp_name, args.listid)
    iteration = 0

    trained_model_path = "/data4/lilin/my_code/realtime/ucf24/rgb-ssd300_ucf24_120000.pth"

    num_classes = len(CLASSES) + 1  #7 +1 background
    net = build_ssd(300, num_classes)  # initialize SSD
    net.load_state_dict(torch.load(trained_model_path))
    net.eval()
    if args.cuda:
        net = net.cuda()
        cudnn.benchmark = True
    print('Finished loading model %d !' % iteration)
    # Load dataset
    dataset = UCF24Detection(args.data_root,
                             'test',
                             BaseTransform(args.ssd_dim, means),
                             AnnotationTransform(),
                             input_type=args.input_type,
                             full_test=True)
    # evaluation
    torch.cuda.synchronize()
    tt0 = time.perf_counter()

    ptr_str = '\niou_thresh:::=>' + str(args.iou_thresh) + '\n'
    print(ptr_str)

    mAP, ap_all, ap_strs = te_net(net, args.save_root, exp_name,
                                  args.input_type, dataset, iteration,
                                  num_classes, args.iou_thresh)
    for ap_str in ap_strs:
        print(ap_str)

    ptr_str = '\nMEANAP:::=>' + str(mAP) + '\n'
    print(ptr_str)

    torch.cuda.synchronize()
    print('Complete set time {:0.2f}'.format(time.perf_counter() - tt0))
Exemple #13
0
def DatasetSync(dataset='voc', split='training'):

    if dataset == 'voc':
        DataRoot = os.path.join(args.data_root, 'VOCdevkit')
        dataset = VOCDetection(DataRoot, train_sets,
                               SSDAugmentation(args.dim, means),
                               AnnotationTransform())
    elif dataset == 'kitti':
        DataRoot = os.path.join(args.data_root, 'kitti')
        dataset = KittiLoader(DataRoot,
                              split=split,
                              img_size=(1000, 300),
                              transforms=SSDAugmentation((1000, 300), means),
                              target_transform=AnnotationTransform_kitti())
    return dataset
def action_detection_images(num_classes, means_bgr, li_color_class):

    exp_name = 'CONV-SSD-{}-{}-bs-{}-{}-lr-{:05d}'.format(
        args.dataset, args.input_type, args.batch_size, args.basenet[:-14],
        int(args.lr * 100000))
    print('Exp name', exp_name, args.listid)
    for iteration in [int(itr) for itr in args.eval_iter.split(',')]:
        log_file = open(
            args.save_root + 'cache/' + exp_name +
            "/testing-{:d}.log".format(iteration), "w", 1)
        log_file.write(exp_name + '\n')
        #trained_model_path = args.save_root + 'cache/' + exp_name + '/ssd300_ucf24_' + repr(iteration) + '.pth'
        trained_model_path = args.save_root + 'cache/' + exp_name + '/' + args.input_type + '-ssd300_ucf24_' + repr(
            iteration) + '.pth'
        log_file.write(trained_model_path + '\n')
        net = init_ssd(num_classes, trained_model_path, args.cuda)
        print('Finished loading model %d !' % iteration)
        # Load dataset
        dataset = UCF24Detection(args.data_root,
                                 'test',
                                 BaseTransform(args.ssd_dim, means_bgr),
                                 AnnotationTransform(),
                                 input_type=args.input_type,
                                 full_test=True)
        #print('dataset.CLASSES : ', dataset.CLASSES);   exit()
        # evaluation
        torch.cuda.synchronize()
        tt0 = time.perf_counter()
        log_file.write('Testing net \n')
        #mAP, ap_all, ap_strs = test_net(net, args.save_root, exp_name, args.input_type, dataset, iteration, num_classes)
        mAP, ap_all, ap_strs = test_net(net, args.save_root, exp_name,
                                        args.input_type, dataset, iteration,
                                        li_color_class, means_bgr,
                                        args.n_record, args.iou_thresh)
        for ap_str in ap_strs:
            log_file.write(ap_str + '\n')
        ptr_str = '\nMEANAP:::=>' + str(mAP) + '\n'
        print(ptr_str)
        log_file.write(ptr_str)

        torch.cuda.synchronize()
        print('Complete set time {:0.2f}'.format(time.perf_counter() - tt0))
        log_file.close()
    return
Exemple #15
0
def train():
    net.train()
    train_loss = 0
    print('Loading Dataset...')
    dataset = VOCDetection(VOCroot, 'train',
                           base_transform(ssd_dim, rgb_means),
                           AnnotationTransform())
    epoch_size = len(dataset) // args.batch_size
    print('Training SSD on', dataset.name)
    step_index = 0
    for iteration in range(max_iter):
        if iteration % epoch_size == 0:
            # create batch iterator
            batch_iterator = iter(data.DataLoader(dataset,batch_size, \
                                  shuffle=True,collate_fn=detection_collate))
            if iteration in stepvalues:
                step_index += 1
                adjust_learning_rate(optimizer, args.gamma, step_index)

        # load train data
        images, targets = next(batch_iterator)
        images = Variable(images.cuda())
        targets = [Variable(anno.cuda()) for anno in targets]
        #forward
        t0 = time.time()
        out = net(images)
        # backprop
        optimizer.zero_grad()
        loss = criterion(out, targets)
        loss.backward()
        optimizer.step()
        t1 = time.time()
        train_loss += loss.data[0]
        if iteration % 10 == 0:
            print('Timer: ', t1 - t0)
            print('Loss: %f' % (loss.data[0]), end=' ')
        if iteration % 5000 == 0:
            torch.save(net.state_dict(),
                       'weights/ssd_iter_new' + repr(iteration) + '.pth')
    torch.save(net, args.save_folder + '' + args.version + '.pth')
def train(args, net, optimizer, criterion, scheduler):
    log_file = open(args.save_root + "training.log", "w", 1)
    log_file.write(args.exp_name + '\n')
    for arg in vars(args):
        print(arg, getattr(args, arg))
        log_file.write(str(arg) + ': ' + str(getattr(args, arg)) + '\n')
    log_file.write(str(net))
    net.train()

    # loss counters
    batch_time = AverageMeter()
    losses = AverageMeter()
    loc_losses = AverageMeter()
    cls_losses = AverageMeter()

    print('Loading Dataset...')
    if args.dataset == 'ucf24':
        train_dataset = OmniUCF24(args.data_root,
                                  args.train_sets,
                                  SSDAugmentation(300, args.means),
                                  AnnotationTransform(),
                                  input_type=args.input_type,
                                  outshape=args.outshape)
        val_dataset = OmniUCF24(args.data_root,
                                'test',
                                BaseTransform(300, args.means),
                                AnnotationTransform(),
                                input_type=args.input_type,
                                outshape=args.outshape)
    else:
        train_dataset = OmniJHMDB(args.data_root,
                                  args.train_sets,
                                  SSDAugmentation(300, None),
                                  AnnotationTransform(),
                                  outshape=args.outshape)
        val_dataset = OmniJHMDB(args.data_root,
                                'test',
                                BaseTransform(300, None),
                                AnnotationTransform(),
                                outshape=args.outshape)

    print('Training SSD on', train_dataset.name)

    batch_iterator = None
    train_data_loader = data.DataLoader(train_dataset,
                                        args.batch_size,
                                        num_workers=args.num_workers,
                                        shuffle=True,
                                        collate_fn=detection_collate,
                                        pin_memory=True)
    val_data_loader = data.DataLoader(val_dataset,
                                      2,
                                      num_workers=1,
                                      shuffle=False,
                                      collate_fn=detection_collate,
                                      pin_memory=True)
    itr_count = 0
    args.max_iter = 150000
    epoch_count = 0
    torch.cuda.synchronize()
    t0 = time.perf_counter()
    iteration = 0
    encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
    while iteration <= args.max_iter:
        for i, (images, targets, _) in enumerate(train_data_loader):

            if iteration > args.max_iter:
                break
            iteration += 1

            if args.cuda:
                images = images.cuda(0, non_blocking=True)
                targets = [anno.cuda(0, non_blocking=True) for anno in targets]

            # forward
            out = net(images)
            # backprop
            optimizer.zero_grad()

            loss_l, loss_c = criterion(out, targets)
            loss = loss_l + loss_c

            loss.backward()
            optimizer.step()
            scheduler.step()
            loc_loss = loss_l.item()
            conf_loss = loss_c.item()
            # print('Loss data type ',type(loc_loss))
            loc_losses.update(loc_loss)
            cls_losses.update(conf_loss)
            losses.update(loss.item() / 2.0)

            if iteration % args.print_step == 0 and iteration > 0:
                torch.cuda.synchronize()
                t1 = time.perf_counter()
                batch_time.update(t1 - t0)

                print_line = 'E{:02d} Iter {:06d}/{:06d} loc-loss {:.3f}({:.3f}) cls-loss {:.3f}({:.3f}) ' \
                         'avg-loss {:.3f}({:.3f}) Timer {:0.3f}({:0.3f})'.format(epoch_count,
                          iteration, args.max_iter, loc_losses.val, loc_losses.avg, cls_losses.val,
                          cls_losses.avg, losses.val, losses.avg, batch_time.val, batch_time.avg)

                torch.cuda.synchronize()
                t0 = time.perf_counter()
                log_file.write(print_line + '\n')
                print(print_line)

                itr_count += 1

                if itr_count % args.loss_reset_step == 0 and itr_count > 0:
                    loc_losses.reset()
                    cls_losses.reset()
                    losses.reset()
                    batch_time.reset()
                    print('Reset accumulators of ', args.exp_name, ' at',
                          itr_count * args.print_step)
                    itr_count = 0

            if (iteration % args.eval_step == 0) and iteration > 0:
                torch.cuda.synchronize()
                tvs = time.perf_counter()
                print('Saving state, iter:', iteration)
                torch.save(
                    net.state_dict(), args.save_root + 'ssd300_ucf24_' +
                    repr(iteration) + '.pth')

                net.eval()  # switch net to evaluation mode
                mAP, ap_all, ap_strs = validate(args,
                                                net,
                                                val_data_loader,
                                                val_dataset,
                                                iteration,
                                                iou_thresh=args.iou_thresh)

                for ap_str in ap_strs:
                    print(ap_str)
                    log_file.write(ap_str + '\n')
                ptr_str = '\nMEANAP:::=>' + str(mAP) + '\n'
                print(ptr_str)
                log_file.write(ptr_str)

                net.train()  # Switch net back to training mode
                torch.cuda.synchronize()
                t0 = time.perf_counter()
                prt_str = '\nValidation TIME::: {:0.3f}\n\n'.format(t0 - tvs)
                print(prt_str)
                log_file.write(ptr_str)
        epoch_count += 1
    log_file.close()
Exemple #17
0
    #commented
    #print('Evaluating detections')
    return ( testset.evaluate_detections(all_boxes, save_folder),total_detect_time,total_nms_time,4951/(total_nms_time+total_detect_time),4951/(total_detect_time) )



if __name__ == '__main__':
    # load net
    #torch.cuda.set_device(args.device)
    img_dim = (300,512)[args.size=='512']
    num_classes = (21, 81)[args.dataset == 'COCO']
    net = build_ssd('test', img_dim, num_classes)    # initialize detector

    if args.dataset == 'VOC':
        testset = VOCDetection(
            VOCroot, [('2007', 'test')], None, AnnotationTransform())
    elif args.dataset == 'COCO':
        testset = COCODetection(
            COCOroot, [('2014', 'minival')], None)
            #COCOroot, [('2015', 'test-dev')], None)
    else:
        print('Only VOC and COCO dataset are supported now!')

    top_k = 200
    detector = Detect(num_classes,0,cfg)
    save_folder = os.path.join(args.save_folder,args.dataset)
    rgb_means = ((104, 117, 123),(103.94,116.78,123.68))[args.version == 'RFB_mobile']

    start_iter = 100000
    end_iter = 154000
    step = 2000
Exemple #18
0
import numpy as np
import cv2
if torch.cuda.is_available():
    torch.set_default_tensor_type('torch.cuda.FloatTensor')

from ssd import build_ssd
# from models import build_ssd as build_ssd_v1 # uncomment for older pool6 model

net = build_ssd('test', 300, 21)    # initialize SSD
net.load_weights('../weights/ssd300_mAP_77.43_v2.pth')
# image = cv2.imread('./data/example.jpg', cv2.IMREAD_COLOR)  # uncomment if dataset not downloaded
%matplotlib inline
from matplotlib import pyplot as plt
from data import VOCDetection, VOCroot, AnnotationTransform
# here we specify year (07 or 12) and dataset ('test', 'val', 'train') 
testset = VOCDetection(VOCroot, [('2007', 'val')], None, AnnotationTransform())
img_id = 60
image = testset.pull_image(img_id)
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# View the sampled input image before transform
plt.figure(figsize=(10,10))
plt.imshow(rgb_image)
plt.show()

x = cv2.resize(image, (300, 300)).astype(np.float32)
x -= (104.0, 117.0, 123.0)
x = x.astype(np.float32)
x = x[:, :, ::-1].copy()
plt.imshow(x)
x = torch.from_numpy(x).permute(2, 0, 1)
def main():
    global args, log_file, best_prec1
    relative_path = '/data4/lilin/my_code'
    parser = argparse.ArgumentParser(description='Single Shot MultiBox Detector Training')
    parser.add_argument('--version', default='v2', help='conv11_2(v2) or pool6(v1) as last layer')
    parser.add_argument('--basenet', default='vgg16_reducedfc.pth', help='pretrained base model')
    parser.add_argument('--dataset', default='ucf24', help='pretrained base model')
    parser.add_argument('--ssd_dim', default=300, type=int, help='Input Size for SSD')  # only support 300 now
    parser.add_argument('--modality', default='rgb', type=str,
                        help='INput tyep default rgb options are [rgb,brox,fastOF]')
    parser.add_argument('--jaccard_threshold', default=0.5, type=float, help='Min Jaccard index for matching')
    parser.add_argument('--batch_size', default=32, type=int, help='Batch size for training')
    parser.add_argument('--num_workers', default=0, type=int, help='Number of workers used in dataloading')
    parser.add_argument('--max_iter', default=120000, type=int, help='Number of training iterations')
    parser.add_argument('--man_seed', default=123, type=int, help='manualseed for reproduction')
    parser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda to train model')
    parser.add_argument('--ngpu', default=1, type=str2bool, help='Use cuda to train model')
    parser.add_argument('--base_lr', default=0.0005, type=float, help='initial learning rate')
    parser.add_argument('--lr', default=0.0005, type=float, help='initial learning rate')
    parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
    parser.add_argument('--weight_decay', default=5e-4, type=float, help='Weight decay for SGD')
    parser.add_argument('--gamma', default=0.2, type=float, help='Gamma update for SGD')
    parser.add_argument('--log_iters', default=True, type=bool, help='Print the loss at each iteration')
    parser.add_argument('--visdom', default=False, type=str2bool, help='Use visdom to for loss visualization')
    parser.add_argument('--data_root', default= relative_path + '/realtime/', help='Location of VOC root directory')
    parser.add_argument('--save_root', default= relative_path + '/realtime/saveucf24/',
                        help='Location to save checkpoint models')
    parser.add_argument('--iou_thresh', default=0.5, type=float, help='Evaluation threshold')
    parser.add_argument('--conf_thresh', default=0.01, type=float, help='Confidence threshold for evaluation')
    parser.add_argument('--nms_thresh', default=0.45, type=float, help='NMS threshold')
    parser.add_argument('--topk', default=50, type=int, help='topk for evaluation')
    parser.add_argument('--clip_gradient', default=40, type=float, help='gradients clip')
    parser.add_argument('--resume', default=None,type=str, help='Resume from checkpoint')
    parser.add_argument('--start_epoch', default=0, type=int, help='start epoch')
    parser.add_argument('--epochs', default=35, type=int, metavar='N',
                        help='number of total epochs to run')
    parser.add_argument('--eval_freq', default=2, type=int, metavar='N', help='evaluation frequency (default: 5)')
    parser.add_argument('--snapshot_pref', type=str, default="ucf101_vgg16_ssd300_end2end")
    parser.add_argument('--lr_milestones', default=[-2, -5], type=float, help='initial learning rate')
    parser.add_argument('--arch', type=str, default="VGG16")
    parser.add_argument('--Finetune_SSD', default=False, type=str)
    parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
                        help='evaluate model on validation set')
    parser.add_argument(
        '--step',
        type=int,
        default=[18, 27],
        nargs='+',
        help='the epoch where optimizer reduce the learning rate')
    parser.add_argument('--log_lr', default=False, type=str2bool, help='Use cuda to train model')
    parser.add_argument(
        '--print-log',
        type=str2bool,
        default=True,
        help='print logging or not')
    parser.add_argument(
        '--end2end',
        type=str2bool,
        default=False,
        help='print logging or not')

    ## Parse arguments
    args = parser.parse_args()

    print(__file__)

    print_log(args, this_file_name)
    ## set random seeds
    np.random.seed(args.man_seed)
    torch.manual_seed(args.man_seed)
    if args.cuda:
        torch.cuda.manual_seed_all(args.man_seed)

    if args.cuda and torch.cuda.is_available():
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
    else:
        torch.set_default_tensor_type('torch.FloatTensor')

    args.cfg = v2
    args.train_sets = 'train'
    args.means = (104, 117, 123)
    num_classes = len(CLASSES) + 1
    args.num_classes = num_classes
    # args.step = [int(val) for val in args.step.split(',')]
    args.loss_reset_step = 30
    args.eval_step = 10000
    args.print_step = 10
    args.data_root += args.dataset + '/'

    ## Define the experiment Name will used to same directory
    args.snapshot_pref = ('ucf101_CONV-SSD-{}-{}-bs-{}-{}-lr-{:05d}').format(args.dataset,
                args.modality, args.batch_size, args.basenet[:-14], int(args.lr*100000)) # + '_' + file_name + '_' + day
    print_log(args, args.snapshot_pref)

    if not os.path.isdir(args.save_root):
        os.makedirs(args.save_root)

    net = build_ssd(300, args.num_classes)

    if args.Finetune_SSD is True:
        print_log(args, "load snapshot")
        pretrained_weights = "/home2/lin_li/zjg_code/realtime/ucf24/rgb-ssd300_ucf24_120000.pth"
        pretrained_dict = torch.load(pretrained_weights)
        model_dict = net.state_dict()  # 1. filter out unnecessary keys
        pretrained_dict_2 = {k: v for k, v in pretrained_dict.items() if k in model_dict } # 2. overwrite entries in the existing state dict
        # pretrained_dict_2['vgg.25.bias'] = pretrained_dict['vgg.24.bias']
        # pretrained_dict_2['vgg.25.weight'] = pretrained_dict['vgg.24.weight']
        # pretrained_dict_2['vgg.27.bias'] = pretrained_dict['vgg.26.bias']
        # pretrained_dict_2['vgg.27.weight'] = pretrained_dict['vgg.26.weight']
        # pretrained_dict_2['vgg.29.bias'] = pretrained_dict['vgg.28.bias']
        # pretrained_dict_2['vgg.29.weight'] = pretrained_dict['vgg.28.weight']
        # pretrained_dict_2['vgg.32.bias'] = pretrained_dict['vgg.31.bias']
        # pretrained_dict_2['vgg.32.weight'] = pretrained_dict['vgg.31.weight']
        # pretrained_dict_2['vgg.34.bias'] = pretrained_dict['vgg.33.bias']
        # pretrained_dict_2['vgg.34.weight'] = pretrained_dict['vgg.33.weight']
        model_dict.update(pretrained_dict_2) # 3. load the new state dict
    elif args.resume is not None:
        if os.path.isfile(args.resume):
            print_log(args, ("=> loading checkpoint '{}'".format(args.resume)))
            checkpoint = torch.load(args.resume)
            if args.end2end is False:
                args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            net.load_state_dict(checkpoint['state_dict'])
            print_log(args, ("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.evaluate, checkpoint['epoch'])))
        else:
            print_log(args, ("=> no checkpoint found at '{}'".format(args.resume)))

    elif args.modality == 'fastOF':
        print_log(args, 'Download pretrained brox flow trained model weights and place them at:::=> ' + args.data_root + 'ucf24/train_data/brox_wieghts.pth')
        pretrained_weights = args.data_root + 'train_data/brox_wieghts.pth'
        print_log(args, 'Loading base network...')
        net.load_state_dict(torch.load(pretrained_weights))
    else:
        vgg_weights = torch.load(args.data_root +'train_data/' + args.basenet)
        print_log(args, 'Loading base network...')
        net.vgg.load_state_dict(vgg_weights)

    if args.cuda:
        net = net.cuda()

    def xavier(param):
        init.xavier_uniform(param)

    def weights_init(m):
        if isinstance(m, nn.Conv2d):
            xavier(m.weight.data)
            m.bias.data.zero_()

    print_log(args, 'Initializing weights for extra layers and HEADs...')
    # initialize newly added layers' weights with xavier method
    if args.Finetune_SSD is False and args.resume is None:
        print_log(args, "init layers")
        net.clstm.apply(weights_init)
        net.extras.apply(weights_init)
        net.loc.apply(weights_init)
        net.conf.apply(weights_init)

    parameter_dict = dict(net.named_parameters()) # Get parmeter of network in dictionary format wtih name being key
    params = []

    #Set different learning rate to bias layers and set their weight_decay to 0
    for name, param in parameter_dict.items():
        # if args.end2end is False and name.find('vgg') > -1 and int(name.split('.')[1]) < 23:# :and name.find('cell') <= -1
        #     param.requires_grad = False
        #     print_log(args, name + 'layer parameters will be fixed')
        # else:
        if name.find('bias') > -1:
            print_log(args, name + 'layer parameters will be trained @ {}'.format(args.lr*2))
            params += [{'params': [param], 'lr': args.lr*2, 'weight_decay': 0}]
        else:
            print_log(args, name + 'layer parameters will be trained @ {}'.format(args.lr))
            params += [{'params':[param], 'lr': args.lr, 'weight_decay':args.weight_decay}]

    optimizer = optim.SGD(params, lr=args.base_lr, momentum=args.momentum, weight_decay=args.weight_decay)
    criterion = MultiBoxLoss(args.num_classes, 0.5, True, 0, True, 3, 0.5, False, args.cuda)

    scheduler = None
    # scheduler = MultiStepLR(optimizer, milestones=args.step, gamma=args.gamma)
    rootpath = args.data_root
    split = 1
    splitfile = rootpath + 'splitfiles/trainlist{:02d}.txt'.format(split)
    trainvideos = readsplitfile(splitfile)

    splitfile = rootpath + 'splitfiles/testlist{:02d}.txt'.format(split)
    testvideos = readsplitfile(splitfile)


    print_log(args, 'Loading Dataset...')
    # train_dataset = UCF24Detection(args.data_root, args.train_sets, SSDAugmentation(args.ssd_dim, args.means),
    #                                AnnotationTransform(), input_type=args.modality)
    # val_dataset = UCF24Detection(args.data_root, 'test', BaseTransform(args.ssd_dim, args.means),
    #                              AnnotationTransform(), input_type=args.modality,
    #                              full_test=False)

    # train_data_loader = data.DataLoader(train_dataset, args.batch_size, num_workers=args.num_workers,
    #                               shuffle=False, collate_fn=detection_collate, pin_memory=True)
    # val_data_loader = data.DataLoader(val_dataset, args.batch_size, num_workers=args.num_workers,
    #                              shuffle=False, collate_fn=detection_collate, pin_memory=True)

    len_test = len(testvideos)
    random.shuffle(testvideos)
    testvideos_temp = testvideos
    val_dataset = UCF24Detection(args.data_root, 'test', BaseTransform(args.ssd_dim, args.means),
                                 AnnotationTransform(), input_type=args.modality,
                                 full_test=False,
                                 videos=testvideos_temp,
                                 istrain=False)
    val_data_loader = data.DataLoader(val_dataset, args.batch_size, num_workers=args.num_workers,
                                           shuffle=False, collate_fn=detection_collate, pin_memory=True,
                                           drop_last=True)


    # print_log(args, "train epoch_size: " + str(len(train_data_loader)))
    # print_log(args, 'Training SSD on' + train_dataset.name)

    print_log(args, args.snapshot_pref)
    for arg in vars(args):
        print(arg, getattr(args, arg))
        print_log(args, str(arg)+': '+str(getattr(args, arg)))

    print_log(args, str(net))
    len_train = len(trainvideos)
    torch.cuda.synchronize()
    for epoch in range(args.start_epoch, args.epochs):

        random.shuffle(trainvideos)
        trainvideos_temp = trainvideos
        train_dataset = UCF24Detection(args.data_root, 'train', SSDAugmentation(args.ssd_dim, args.means),
                                       AnnotationTransform(),
                                       input_type=args.modality,
                                       videos=trainvideos_temp,
                                       istrain=True)
        train_data_loader = data.DataLoader(train_dataset, args.batch_size, num_workers=args.num_workers,
                                                 shuffle=False, collate_fn=detection_collate, pin_memory=True, drop_last=True)

        train(train_data_loader, net, criterion, optimizer, epoch, scheduler)
        print_log(args, 'Saving state, epoch:' + str(epoch))

        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': net.state_dict(),
            'best_prec1': best_prec1,
        }, epoch = epoch)

        # evaluate on validation set
        if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:
            torch.cuda.synchronize()
            tvs = time.perf_counter()
            mAP, ap_all, ap_strs = validate(args, net, val_data_loader, val_dataset, epoch, iou_thresh=args.iou_thresh)
            # remember best prec@1 and save checkpoint
            is_best = mAP > best_prec1
            best_prec1 = max(mAP, best_prec1)
            print_log(args, 'Saving state, epoch:' +str(epoch))
            save_checkpoint({
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': net.state_dict(),
                'best_prec1': best_prec1,
            }, is_best,epoch)

            for ap_str in ap_strs:
                print(ap_str)
                print_log(args, ap_str)
            ptr_str = '\nMEANAP:::=>'+str(mAP)
            print(ptr_str)
            # log_file.write()
            print_log(args, ptr_str)

            torch.cuda.synchronize()
            t0 = time.perf_counter()
            prt_str = '\nValidation TIME::: {:0.3f}\n\n'.format(t0-tvs)
            print(prt_str)
            # log_file.write(ptr_str)
            print_log(args, ptr_str)
Exemple #20
0
def train():
    net.train()
    # loss counters
    loc_loss = 0  # epoch
    conf_loss = 0
    epoch = 0
    print('Loading Dataset...')

    dataset = VOCDetection(VOCroot, train_sets, preproc(img_dim, rgb_means, p), AnnotationTransform())

    epoch_size = len(dataset) // args.batch_size
    max_iter = args.max_epoch * epoch_size

    stepvalues = (80 * epoch_size, 95 * epoch_size, 105 * epoch_size)
    step_index = 0
    start_iter = 0
# wangsong sing a song!
    lr = args.lr
    for iteration in range(start_iter, max_iter):
        if iteration % epoch_size == 0:
            if (epoch > 10 and epoch % 10 == 0) or (epoch > 105 and epoch % 2 == 0):
                torch.save(net.state_dict(), args.save_folder + 'epoches_' +
                           repr(epoch).zfill(3) + '.pth')
            # create batch iterator
            batch_iterator = iter(data.DataLoader(dataset, batch_size,
                                                  shuffle=True, num_workers=8, collate_fn=detection_collate))
            loc_loss = 0
            conf_loss = 0
            epoch += 1

        load_t0 = time.time()
        if iteration in stepvalues:
            step_index += 1
        lr = adjust_learning_rate(optimizer, 0.2, epoch, step_index, iteration, epoch_size)


        images, targets = next(batch_iterator)

        images = Variable(images.cuda())
        targets = [Variable(anno.cuda()) for anno in targets]

        # forward
        t0 = time.time()
        out = net(images)
        # backprop
        optimizer.zero_grad()
        loss_l, loss_c = criterion(out, priors, targets)
        loss = loss_l + loss_c
        loss.backward()
        optimizer.step()
        t1 = time.time()
        loc_loss += loss_l.item()
        conf_loss += loss_c.item()
        load_t1 = time.time()

        # visualization
        visualize_total_loss(writer, loss.item(), iteration)
        visualize_loc_loss(writer, loss_l.item(), iteration)
        visualize_conf_loss(writer, loss_c.item(), iteration)

        if iteration % 10 == 0:
            print('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size)
                  + '|| Totel iter ' +
                  repr(iteration) + ' || L: %.4f C: %.4f||' % (
                loss_l.item(),loss_c.item()) +
                'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' % (lr))

    torch.save(net.state_dict(), args.save_folder + 'epoches_' +
               repr(epoch).zfill(3) + '.pth')
Exemple #21
0
def train():
    net.train()
    # loss counters
    loc_loss = 0  # epoch
    conf_loss = 0
    epoch = 0 + args.resume_epoch
    print('Loading Dataset...')

    if args.dataset == 'VOC':
        dataset = VOCDetection(VOCroot, train_sets,
                               preproc(img_dim, rgb_means, p),
                               AnnotationTransform())
    elif args.dataset == 'COCO':
        dataset = COCODetection(COCOroot, train_sets,
                                preproc(img_dim, rgb_means, p))
    else:
        print('Only VOC and COCO are supported now!')
        return

    epoch_size = len(dataset) // args.batch_size
    max_iter = args.max_epoch * epoch_size

    stepvalues_VOC = (150 * epoch_size, 200 * epoch_size, 250 * epoch_size)
    stepvalues_COCO = (100 * epoch_size, 135 * epoch_size, 170 * epoch_size)
    stepvalues = (stepvalues_VOC, stepvalues_COCO)[args.dataset == 'COCO']
    print('Training', args.version, 'on', dataset.name)
    step_index = 0

    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
        for sv in stepvalues:
            if start_iter > sv:
                step_index += 1
                continue
            else:
                break
    else:
        start_iter = 0

    lr = args.lr
    avg_loss_list = []
    for iteration in range(start_iter, max_iter):
        if iteration % epoch_size == 0:
            # create batch iterator
            batch_iterator = iter(
                data.DataLoader(dataset,
                                batch_size,
                                shuffle=True,
                                num_workers=args.num_workers,
                                collate_fn=detection_collate))
            avg_loss = (loc_loss + conf_loss) / epoch_size
            avg_loss_list.append(avg_loss)
            print("avg_loss_list:")
            if len(avg_loss_list) <= 5:
                print(avg_loss_list)
            else:
                print(avg_loss_list[-5:])
            loc_loss = 0
            conf_loss = 0
            if (epoch % 10 == 0):
                torch.save(
                    net.state_dict(), args.save_folder + args.version + '_' +
                    args.dataset + '_epoches_' + repr(epoch) + '.pth')
            epoch += 1

        load_t0 = time.time()
        if iteration in stepvalues:
            step_index += 1
        lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index,
                                  iteration, epoch_size)

        images, targets = next(batch_iterator)

        if args.cuda:
            images = Variable(images.cuda())
            targets = [Variable(anno.cuda()) for anno in targets]
        else:
            images = Variable(images)
            targets = [Variable(anno) for anno in targets]
        out = net(images)
        optimizer.zero_grad()
        loss_l, loss_c = criterion(out, priors, targets)
        loss = loss_l + loss_c
        loss.backward()
        # if epoch > args.warm_epoch:
        #     updateBN()
        optimizer.step()
        t1 = time.time()
        loc_loss += loss_l.item()
        conf_loss += loss_c.item()
        load_t1 = time.time()
        if iteration % 10 == 0:
            print(
                'Epoch:' + repr(epoch) + ' || epochiter: ' +
                repr(iteration % epoch_size) + '/' + repr(epoch_size) +
                '|| Totel iter ' + repr(iteration) +
                ' || L: %.4f C: %.4f S: %.4f||' %
                (loss_l.item(), loss_c.item(), loss_l.item() + loss_c.item()) +
                'Batch time: %.4f ||' % (load_t1 - load_t0) + 'LR: %.7f' %
                (lr))

    torch.save(
        net.state_dict(), args.save_folder + 'Final_' + args.version + '_' +
        args.dataset + '.pth')
Exemple #22
0
def train():
    net.train()
    # loss counters
    loc_loss = 0  # epoch
    conf_loss = 0
    epoch = 0 + args.resume_epoch
    print('Loading Dataset...')
    f_writer.write('Loading Dataset...\n')

    if args.dataset == 'VOC':
        dataset = VOCDetection(VOCroot, train_sets, preproc(
            img_dim, rgb_means, p), AnnotationTransform())
    elif args.dataset == 'COCO':
        dataset = COCODetection(COCOroot, train_sets, preproc(
            img_dim, rgb_means, p))
    else:
        print('Only VOC and COCO are supported now!')
        return

    epoch_size = len(dataset) // args.batch_size
    max_iter = args.max_epoch * epoch_size

    stepvalues_VOC = (150 * epoch_size, 200 * epoch_size, 250 * epoch_size)
    stepvalues_COCO = (90 * epoch_size, 120 * epoch_size, 140 * epoch_size)
    stepvalues = (stepvalues_VOC,stepvalues_COCO)[args.dataset=='COCO']
    print('Training',args.version, 'on', dataset.name)
    f_writer.write('Training'+args.version+ 'on'+ dataset.name+ '\n')
    step_index = 0

    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
    else:
        start_iter = 0

    lr = args.lr

    loss = [None] * 2
    loss_l = [None] * 2
    loss_c = [None] * 2

    for iteration in range(start_iter, max_iter):
        if iteration % epoch_size == 0:
            # create batch iterator
            batch_iterator = iter(data.DataLoader(dataset, batch_size,
                                                  shuffle=True, num_workers=args.num_workers, collate_fn=detection_collate))
            loc_loss = 0
            conf_loss = 0
            if (epoch % 40 == 0 and epoch > 0) or (epoch % 10 ==0 and epoch > 200):
                torch.save(net.state_dict(), args.save_folder+args.version+'_'+args.dataset + '_epoches_'+
                           repr(epoch) + '_refine_agnostic_{}.pth.{}'.format(C_agnostic, args.extra))
            epoch += 1

        load_t0 = time.time()
        if iteration in stepvalues:
            step_index += 1
        lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index, iteration, epoch_size)


        # load train data
        targets = [None] * 2
        images, targets[1] = next(batch_iterator)

        targets[0] = [None] * len(targets[1])
        if C_agnostic:
            for i in range(len(targets[1])):
                targets[0][i] = targets[1][i].clone()
                targets[0][i][:,4] = targets[0][i][:,4].ge(1)
        else:
            targets[0] = targets[1]
        
        #print(np.sum([torch.sum(anno[:,-1] == 2) for anno in targets]))

        if args.cuda:
            images = Variable(images.cuda())
            targets[0] = [Variable(anno.cuda(),volatile=True) for anno in targets[0]]
            targets[1] = [Variable(anno.cuda(),volatile=True) for anno in targets[1]]
        else:
            images = Variable(images)
            targets[0] = [Variable(anno, volatile=True) for anno in targets[0]]
            targets[1] = [Variable(anno, volatile=True) for anno in targets[1]]
        # forward
        t0 = time.time()
        out = net(images)

        ### calculation refined anchors
        # loc_data = Variable(out[0][0].data.clone(), volatile=True)
        loc_data = out[0][0].data.clone()
        conf_data = Variable(out[0][1].data.clone(), volatile=True)
        ## decode and clamp
        r_priors = decode(loc_data, priors.data, cfg['variance'])
        if args.bp_anchors:
            r_priors = Variable(r_priors, requires_grad=True)
        else:
            r_priors = Variable(r_priors, volatile=True)

        # for i in range(loc_data.size(0)):
        #     z = box_utils.decode(loc_data.data[i,:,:], priors.data, cfg['variance'])
        #     # loc_data[i,:,:].clamp_(0,1)

        # backprop
        optimizer.zero_grad()

        loss_l[0], loss_c[0], pass_index = criterion[0](out[0], priors, targets[0])
        loss[0] = loss_l[0] + loss_c[0]
        
        loss_l[1], loss_c[1], _ = criterion[1](out[1], r_priors, targets[1], pass_index)
        loss[1] = loss_l[1] + loss_c[1]


        loss_total = loss[0] + loss[1]
        loss_total.backward()
        optimizer.step()
        t1 = time.time()
        # loc_loss += loss_l.data[0]
        # conf_loss += loss_c.data[0]
        load_t1 = time.time()
        if iteration % 10 == 0:
            print('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size)
                  + '|| Totel iter ' +
                  repr(iteration) + ' || L1: %.4f C1: %.4f||' % (loss_l[0].data[0],loss_c[0].data[0]) + 
                  ' || L2: %.4f C2: %.4f||' % (loss_l[1].data[0],loss_c[1].data[0]) + 
                  'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' % (lr))
            f_writer.write('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size)
                  + '|| Totel iter ' +
                  repr(iteration) + ' || L1: %.4f C1: %.4f||' % (loss_l[0].data[0],loss_c[0].data[0]) + 
                  ' || L2: %.4f C2: %.4f||' % (loss_l[1].data[0],loss_c[1].data[0]) + 
                  'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' % (lr) + '\n')

    torch.save(net.state_dict(), args.save_folder +
               'Final_' + args.version +'_' + args.dataset+ '_refine_agnostic_{}.pth.{}'.format(C_agnostic, args.extra))

    f_writer.write('training finished!\n')
    f_writer.close()