示例#1
0
def main():

    args.means = (104, 117, 123)  # only support voc now

    exp_name = '{}-SSD-{}-{}-bs-{}-{}-lr-{:05d}'.format(
        args.net_type, args.dataset, args.input_type, args.batch_size,
        args.cfg['base'], int(args.lr * 100000))

    args.save_root += args.dataset + '/'
    args.data_root += args.dataset + '/'
    args.listid = '01'  ## would be usefull in JHMDB-21
    print('Exp name', exp_name, args.listid)

    #
    for iteration in [
            int(itr) for itr in args.eval_iter.split(',') if len(itr) > 0
    ]:
        log_file = open(
            args.save_root + 'cache/' + exp_name +
            "/testing-{:d}-{:0.2f}.log".format(iteration, args.iou_thresh),
            "w", 1)
        log_file.write(exp_name + '\n')
        trained_model_path = args.save_root + 'cache/' + exp_name + '/ssd300_ucf24_' + repr(
            iteration) + '.pth'
        log_file.write(trained_model_path + '\n')
        num_classes = len(CLASSES) + 1  #7 +1 background
        if args.cfg['base'] == 'fpn':
            net = FPNSSD512(num_classes, args.cfg)
        else:
            if args.cfg['min_dim'][0] == 512:
                net = SSD512(num_classes, args.cfg)
            else:
                net = build_vgg_ssd(num_classes, args.cfg)
        net.load_state_dict(torch.load(trained_model_path))
        net.eval()
        if args.cuda:
            net = net.cuda()
            cudnn.benchmark = True
        print('Finished loading model %d !' % iteration)
        # Load dataset
        if args.dataset == 'ucf24':
            dataset = OmniUCF24(args.data_root,
                                'test',
                                BaseTransform(300, args.means),
                                AnnotationTransform(),
                                input_type=args.input_type,
                                outshape=args.outshape,
                                full_test=True)
        else:
            dataset = OmniJHMDB(args.data_root,
                                'test',
                                BaseTransform(300, None),
                                AnnotationTransform(),
                                outshape=args.outshape)

        # evaluation
        torch.cuda.synchronize()
        tt0 = time.perf_counter()
        log_file.write('Testing net \n')
        test_net(net, args.save_root, exp_name, args.input_type, dataset,
                 iteration, num_classes, log_file)

        log_file.close()
示例#2
0
    with open(det_file, 'wb') as f:
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)

    print('Evaluating detections')
    evaluate_detections(all_boxes, output_dir, dataset)


def evaluate_detections(box_list, output_dir, dataset):
    write_voc_results_file(box_list, dataset)
    do_python_eval(output_dir)


if __name__ == '__main__':
    # load net
    num_classes = len(labelmap) + 1                      # +1 for background
    net = build_ssd('test', 300, num_classes)            # initialize SSD
    net.load_state_dict(torch.load(args.trained_model))
    net.eval()
    print('Finished loading model!')
    # load data
    dataset = VOCDetection(args.voc_root, [('2007', set_type)],
                           BaseTransform(300, dataset_mean),
                           VOCAnnotationTransform())
    if args.cuda:
        net = net.cuda()
        cudnn.benchmark = True
    # evaluation
    test_net(args.save_folder, net, args.cuda, dataset,
             BaseTransform(net.size, dataset_mean), args.top_k, 300,
             thresh=args.confidence_threshold)
示例#3
0
     head = k[:7]
     if head == 'module.':
         name = k[7:]  # remove `module.`
     else:
         name = k
     new_state_dict[name] = v
 net.load_state_dict(new_state_dict)
 net.eval()
 print('Finished loading model!')
 if args.cuda:
     net = net.cuda()
     cudnn.benchmark = True
 else:
     net = net.cpu()
 detector = Detect(num_classes, 0, cfg)
 transform = BaseTransform(img_dim, rgb_means, (2, 0, 1))
 cap = cv2.VideoCapture('11.mp4')
 #cap1 = cv2.VideoCapture('rtsp://*****:*****@101.205.119.109:554/Streaming/Channels/301')
 ret, image = cap.read()
 x = transform(image).unsqueeze(0)
 x = x.cuda()
 model_trt = torch2trt(net, [x])
 object_detector = ObjectDetector(model_trt, detector, transform)
 img_list = os.listdir(args.img_dir)
 frame_no = 0
 fourcc = cv2.VideoWriter_fourcc(*'MJPG')
 output = cv2.VideoWriter("demo1.avi", fourcc, 20, (1280, 720))
 while True:
     start = time.time()
     frame_no += 1
     #print(frame_no)
示例#4
0
    with open(det_file, 'wb') as f:
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
    print('Evaluating detections')
    # with open(det_file, 'rb') as f:
    #     all_boxes = pickle.load(f)
    # print('LOADED')
    dataset.evaluate_detections(all_boxes, save_folder)

if __name__ == '__main__':
    # load net
    num_classes = len(labelmap) + 1                      # +1 for background
    net = build_ssd('test', cfg, args.use_pred_module)            # initialize SSD
    net.load_state_dict(torch.load(args.trained_model))
    net.eval()
    print('Finished loading model!')
    print(net)

    # load data
    dataset = COCODetection(args.dataset_root,
                            image_set='minival2014',
                            transform=BaseTransform(cfg['min_dim'], MEANS),
                            target_transform=COCOAnnotationTransform())
    if args.cuda:
        net = net.cuda()
        cudnn.benchmark = True

    # evaluation
    test_net(args.save_folder, net, args.cuda, dataset,
             BaseTransform(net.size, MEANS), args.top_k, 512,
             thresh=args.confidence_threshold)
示例#5
0
文件: train.py 项目: Areslala/code
def main():
    global args
    args = arg_parse()
    cfg_from_file(args.cfg_file)
    save_folder = args.save_folder
    batch_size = cfg.TRAIN.BATCH_SIZE
    bgr_means = cfg.TRAIN.BGR_MEAN
    p = 0.6
    gamma = cfg.SOLVER.GAMMA
    momentum = cfg.SOLVER.MOMENTUM
    weight_decay = cfg.SOLVER.WEIGHT_DECAY
    size = cfg.MODEL.SIZE
    thresh = cfg.TEST.CONFIDENCE_THRESH
    if cfg.DATASETS.DATA_TYPE == 'VOC':
        trainvalDataset = VOCDetection
        top_k = 200
    else:
        trainvalDataset = COCODetection
        top_k = 300
    dataset_name = cfg.DATASETS.DATA_TYPE
    dataroot = cfg.DATASETS.DATAROOT
    trainSet = cfg.DATASETS.TRAIN_TYPE
    valSet = cfg.DATASETS.VAL_TYPE
    num_classes = cfg.MODEL.NUM_CLASSES
    start_epoch = args.resume_epoch
    epoch_step = cfg.SOLVER.EPOCH_STEPS
    end_epoch = cfg.SOLVER.END_EPOCH
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    net = SSD(cfg)
    print(net)
    if cfg.MODEL.SIZE == '300':
        size_cfg = cfg.SMALL
    else:
        size_cfg = cfg.BIG
    optimizer = optim.SGD(net.parameters(),
                          lr=cfg.SOLVER.BASE_LR,
                          momentum=momentum,
                          weight_decay=weight_decay)
    net.apply(weights_init)
    mydic = net.state_dict()
    if args.resume_net != None:
        checkpoint = torch.load(args.resume_net)
        state_dict = checkpoint['model']
        from collections import OrderedDict
        new_state_dict = OrderedDict()
        state_dict = {k: v for k, v in state_dict.items() if k in mydic}

        for k, v in state_dict.items():
            head = k[:7]
            if head == 'module.':
                name = k[7:]  # remove `module.`
            else:
                name = k
            new_state_dict[name] = v

        mydic.update(new_state_dict)

        # net.load_state_dict(new_state_dict)
        net.load_state_dict(mydic)
        optimizer.load_state_dict(checkpoint['optimizer'])
        print('Loading resume network...')
    if args.ngpu > 1:
        net = torch.nn.DataParallel(net)
    net.cuda()
    cudnn.benchmark = True

    criterion = list()
    if cfg.MODEL.REFINE:
        detector = Detect(cfg)
        arm_criterion = RefineMultiBoxLoss(cfg, 2)
        odm_criterion = RefineMultiBoxLoss(cfg, cfg.MODEL.NUM_CLASSES)
        criterion.append(arm_criterion)
        criterion.append(odm_criterion)
    else:
        detector = Detect(cfg)
        ssd_criterion = MultiBoxLoss(cfg)
        criterion.append(ssd_criterion)

    TrainTransform = preproc(size_cfg.IMG_WH, bgr_means, p)
    ValTransform = BaseTransform(size_cfg.IMG_WH, bgr_means, (2, 0, 1))

    val_dataset = trainvalDataset(dataroot, valSet, ValTransform, dataset_name)
    val_loader = data.DataLoader(val_dataset,
                                 batch_size,
                                 shuffle=False,
                                 num_workers=args.num_workers,
                                 collate_fn=detection_collate)

    for epoch in range(start_epoch + 1, end_epoch + 1):
        train_dataset = trainvalDataset(dataroot, trainSet, TrainTransform,
                                        dataset_name)
        epoch_size = len(train_dataset)
        train_loader = data.DataLoader(train_dataset,
                                       batch_size,
                                       shuffle=True,
                                       num_workers=args.num_workers,
                                       collate_fn=detection_collate)
        train(train_loader, net, criterion, optimizer, epoch, epoch_step,
              gamma, end_epoch, cfg)
        if (epoch % 30 == 0) or (epoch % 10 == 0 and epoch >= 400):
            time.sleep(10)
            save_checkpoint(net, epoch, size, optimizer)
        if (epoch >= 200 and epoch % 50 == 0):
            eval_net(val_dataset,
                     val_loader,
                     net,
                     detector,
                     cfg,
                     ValTransform,
                     top_k,
                     thresh=thresh,
                     batch_size=batch_size)
    time.sleep(10)
    save_checkpoint(net, end_epoch, size, optimizer)
示例#6
0
def main():
    global my_dict, keys, k_len, arr, xxx, args, log_file, best_prec1
    relative_path = '/data4/lilin/my_code'
    parser = argparse.ArgumentParser(description='Single Shot MultiBox Detector Training')
    parser.add_argument('--version', default='v2', help='conv11_2(v2) or pool6(v1) as last layer')
    parser.add_argument('--basenet', default='vgg16_reducedfc.pth', help='pretrained base model')
    parser.add_argument('--dataset', default='ucf24', help='pretrained base model')
    parser.add_argument('--ssd_dim', default=300, type=int, help='Input Size for SSD')  # only support 300 now
    parser.add_argument('--modality', default='rgb', type=str,
                        help='INput tyep default rgb options are [rgb,brox,fastOF]')
    parser.add_argument('--jaccard_threshold', default=0.5, type=float, help='Min Jaccard index for matching')
    parser.add_argument('--batch_size', default=32, type=int, help='Batch size for training')
    parser.add_argument('--num_workers', default=0, type=int, help='Number of workers used in dataloading')
    parser.add_argument('--max_iter', default=120000, type=int, help='Number of training iterations')
    parser.add_argument('--man_seed', default=123, type=int, help='manualseed for reproduction')
    parser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda to train model')
    parser.add_argument('--ngpu', default=1, type=str2bool, help='Use cuda to train model')
    parser.add_argument('--base_lr', default=0.0005, type=float, help='initial learning rate')
    parser.add_argument('--lr', default=0.0005, type=float, help='initial learning rate')
    parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
    parser.add_argument('--weight_decay', default=5e-4, type=float, help='Weight decay for SGD')
    parser.add_argument('--gamma', default=0.2, type=float, help='Gamma update for SGD')
    parser.add_argument('--log_iters', default=True, type=bool, help='Print the loss at each iteration')
    parser.add_argument('--visdom', default=False, type=str2bool, help='Use visdom to for loss visualization')
    parser.add_argument('--data_root', default= relative_path + '/realtime/', help='Location of VOC root directory')
    parser.add_argument('--save_root', default= relative_path + '/realtime/saveucf24/',
                        help='Location to save checkpoint models')
    parser.add_argument('--iou_thresh', default=0.5, type=float, help='Evaluation threshold')
    parser.add_argument('--conf_thresh', default=0.01, type=float, help='Confidence threshold for evaluation')
    parser.add_argument('--nms_thresh', default=0.45, type=float, help='NMS threshold')
    parser.add_argument('--topk', default=50, type=int, help='topk for evaluation')
    parser.add_argument('--clip_gradient', default=40, type=float, help='gradients clip')
    parser.add_argument('--resume', default=None,type=str, help='Resume from checkpoint')
    parser.add_argument('--start_epoch', default=0, type=int, help='start epoch')
    parser.add_argument('--epochs', default=35, type=int, metavar='N',
                        help='number of total epochs to run')
    parser.add_argument('--eval_freq', default=2, type=int, metavar='N', help='evaluation frequency (default: 5)')
    parser.add_argument('--snapshot_pref', type=str, default="ucf101_vgg16_ssd300_end2end")
    parser.add_argument('--lr_milestones', default=[-2, -5], type=float, help='initial learning rate')
    parser.add_argument('--arch', type=str, default="VGG16")
    parser.add_argument('--Finetune_SSD', default=False, type=str)
    parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
                        help='evaluate model on validation set')
    parser.add_argument(
        '--step',
        type=int,
        default=[18, 27],
        nargs='+',
        help='the epoch where optimizer reduce the learning rate')
    parser.add_argument('--log_lr', default=False, type=str2bool, help='Use cuda to train model')
    parser.add_argument(
        '--print-log',
        type=str2bool,
        default=True,
        help='print logging or not')
    parser.add_argument(
        '--end2end',
        type=str2bool,
        default=False,
        help='print logging or not')

    ## Parse arguments
    args = parser.parse_args()

    print(__file__)
    file_name = (__file__).split('/')[-1]
    file_name = file_name.split('.')[0]
    print_log(args, file_name)
    ## set random seeds
    np.random.seed(args.man_seed)
    torch.manual_seed(args.man_seed)
    if args.cuda:
        torch.cuda.manual_seed_all(args.man_seed)

    if args.cuda and torch.cuda.is_available():
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
    else:
        torch.set_default_tensor_type('torch.FloatTensor')

    args.cfg = v2
    args.train_sets = 'train'
    args.means = (104, 117, 123)
    num_classes = len(CLASSES) + 1
    args.num_classes = num_classes
    # args.step = [int(val) for val in args.step.split(',')]
    args.loss_reset_step = 30
    args.eval_step = 10000
    args.print_step = 10
    args.data_root += args.dataset + '/'

    ## Define the experiment Name will used to same directory
    args.snapshot_pref = ('ucf101_CONV-SSD-{}-{}-bs-{}-{}-lr-{:05d}').format(args.dataset,
                args.modality, args.batch_size, args.basenet[:-14], int(args.lr*100000)) # + '_' + file_name + '_' + day
    print_log(args, args.snapshot_pref)

    if not os.path.isdir(args.save_root):
        os.makedirs(args.save_root)

    net = build_ssd(300, args.num_classes)

    if args.Finetune_SSD is True:
        print_log(args, "load snapshot")
        pretrained_weights = "/home2/lin_li/zjg_code/realtime/ucf24/rgb-ssd300_ucf24_120000.pth"
        pretrained_dict = torch.load(pretrained_weights)
        model_dict = net.state_dict()  # 1. filter out unnecessary keys
        pretrained_dict_2 = {k: v for k, v in pretrained_dict.items() if k in model_dict } # 2. overwrite entries in the existing state dict
        # pretrained_dict_2['vgg.25.bias'] = pretrained_dict['vgg.24.bias']
        # pretrained_dict_2['vgg.25.weight'] = pretrained_dict['vgg.24.weight']
        # pretrained_dict_2['vgg.27.bias'] = pretrained_dict['vgg.26.bias']
        # pretrained_dict_2['vgg.27.weight'] = pretrained_dict['vgg.26.weight']
        # pretrained_dict_2['vgg.29.bias'] = pretrained_dict['vgg.28.bias']
        # pretrained_dict_2['vgg.29.weight'] = pretrained_dict['vgg.28.weight']
        # pretrained_dict_2['vgg.32.bias'] = pretrained_dict['vgg.31.bias']
        # pretrained_dict_2['vgg.32.weight'] = pretrained_dict['vgg.31.weight']
        # pretrained_dict_2['vgg.34.bias'] = pretrained_dict['vgg.33.bias']
        # pretrained_dict_2['vgg.34.weight'] = pretrained_dict['vgg.33.weight']
        model_dict.update(pretrained_dict_2) # 3. load the new state dict
    elif args.resume is not None:
        if os.path.isfile(args.resume):
            print_log(args, ("=> loading checkpoint '{}'".format(args.resume)))
            checkpoint = torch.load(args.resume)
            if args.end2end is False:
                args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            net.load_state_dict(checkpoint['state_dict'])
            print_log(args, ("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.evaluate, checkpoint['epoch'])))
        else:
            print_log(args, ("=> no checkpoint found at '{}'".format(args.resume)))

    elif args.modality == 'fastOF':
        print_log(args, 'Download pretrained brox flow trained model weights and place them at:::=> ' + args.data_root + 'ucf24/train_data/brox_wieghts.pth')
        pretrained_weights = args.data_root + 'train_data/brox_wieghts.pth'
        print_log(args, 'Loading base network...')
        net.load_state_dict(torch.load(pretrained_weights))
    else:
        vgg_weights = torch.load(args.data_root +'train_data/' + args.basenet)
        print_log(args, 'Loading base network...')
        net.vgg.load_state_dict(vgg_weights)

    if args.cuda:
        net = net.cuda()

    def xavier(param):
        init.xavier_uniform(param)

    def weights_init(m):
        if isinstance(m, nn.Conv2d):
            xavier(m.weight.data)
            m.bias.data.zero_()

    print_log(args, 'Initializing weights for extra layers and HEADs...')
    # initialize newly added layers' weights with xavier method
    if args.Finetune_SSD is False and args.resume is None:
        print_log(args, "init layers")
        net.extras.apply(weights_init)
        net.loc.apply(weights_init)
        net.conf.apply(weights_init)

    parameter_dict = dict(net.named_parameters()) # Get parmeter of network in dictionary format wtih name being key
    params = []

    #Set different learning rate to bias layers and set their weight_decay to 0
    for name, param in parameter_dict.items():
        if args.end2end is False and name.find('vgg') > -1 and int(name.split('.')[1]) < 23:# :and name.find('cell') <= -1
            param.requires_grad = False
            print_log(args, name + 'layer parameters will be fixed')
        else:
            if name.find('bias') > -1:
                print_log(args, name + 'layer parameters will be trained @ {}'.format(args.lr*2))
                params += [{'params': [param], 'lr': args.lr*2, 'weight_decay': 0}]
            else:
                print_log(args, name + 'layer parameters will be trained @ {}'.format(args.lr))
                params += [{'params':[param], 'lr': args.lr, 'weight_decay':args.weight_decay}]

    optimizer = optim.SGD(params, lr=args.base_lr, momentum=args.momentum, weight_decay=args.weight_decay)
    criterion = MultiBoxLoss(args.num_classes, 0.5, True, 0, True, 3, 0.5, False, args.cuda)

    scheduler = None
    # scheduler = MultiStepLR(optimizer, milestones=args.step, gamma=args.gamma)

    print_log(args, 'Loading Dataset...')
    train_dataset = UCF24Detection(args.data_root, args.train_sets, SSDAugmentation(args.ssd_dim, args.means),
                                   AnnotationTransform(), input_type=args.modality)
    val_dataset = UCF24Detection(args.data_root, 'test', BaseTransform(args.ssd_dim, args.means),
                                 AnnotationTransform(), input_type=args.modality,
                                 full_test=False)

    train_data_loader = data.DataLoader(train_dataset, args.batch_size, num_workers=args.num_workers,
                                  shuffle=False, collate_fn=detection_collate, pin_memory=True)
    val_data_loader = data.DataLoader(val_dataset, args.batch_size, num_workers=args.num_workers,
                                 shuffle=False, collate_fn=detection_collate, pin_memory=True)

    print_log(args, "train epoch_size: " + str(len(train_data_loader)))
    print_log(args, 'Training SSD on' + train_dataset.name)

    my_dict = copy.deepcopy(train_data_loader.dataset.train_vid_frame)
    keys = list(my_dict.keys())
    k_len = len(keys)
    arr = np.arange(k_len)
    xxx = copy.deepcopy(train_data_loader.dataset.ids)
    # log_file = open(args.save_root + args.snapshot_pref + "_training_" + day + ".log", "w", 1)
    # log_file.write()
    print_log(args, args.snapshot_pref)
    for arg in vars(args):
        print(arg, getattr(args, arg))
        print_log(args, str(arg)+': '+str(getattr(args, arg)))

    print_log(args, str(net))

    torch.cuda.synchronize()
    for epoch in range(args.start_epoch, args.epochs):

        train(train_data_loader, net, criterion, optimizer, epoch, scheduler)
        print_log(args, 'Saving state, epoch:' + str(epoch))

        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': net.state_dict(),
            'best_prec1': best_prec1,
        }, epoch = epoch)

        # evaluate on validation set
        if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:
            torch.cuda.synchronize()
            tvs = time.perf_counter()
            mAP, ap_all, ap_strs = validate(args, net, val_data_loader, val_dataset, epoch, iou_thresh=args.iou_thresh)
            # remember best prec@1 and save checkpoint
            is_best = mAP > best_prec1
            best_prec1 = max(mAP, best_prec1)
            print_log(args, 'Saving state, epoch:' +str(epoch))
            save_checkpoint({
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': net.state_dict(),
                'best_prec1': best_prec1,
            }, is_best,epoch)

            for ap_str in ap_strs:
                # print(ap_str)
                print_log(args, ap_str)
            ptr_str = '\nMEANAP:::=>'+str(mAP)
            # print(ptr_str)
            # log_file.write()
            print_log(args, ptr_str)

            torch.cuda.synchronize()
            t0 = time.perf_counter()
            prt_str = '\nValidation TIME::: {:0.3f}\n\n'.format(t0-tvs)
            # print(prt_str)
            # log_file.write(ptr_str)
            print_log(args, ptr_str)
def train(args, net, optimizer, criterion, scheduler):
    log_file = open(args.save_root + "training.log", "w", 1)
    log_file.write(args.exp_name + '\n')
    for arg in vars(args):
        print(arg, getattr(args, arg))
        log_file.write(str(arg) + ': ' + str(getattr(args, arg)) + '\n')
    log_file.write(str(net))
    net.train()

    # loss counters
    batch_time = AverageMeter()
    losses = AverageMeter()
    loc_losses = AverageMeter()
    cls_losses = AverageMeter()

    print('Loading Dataset...')
    train_dataset = UCF24Detection(args.data_root,
                                   args.train_sets,
                                   SSDAugmentation(args.ssd_dim, args.means),
                                   AnnotationTransform(),
                                   input_type=args.input_type)
    val_dataset = UCF24Detection(args.data_root,
                                 'test',
                                 BaseTransform(args.ssd_dim, args.means),
                                 AnnotationTransform(),
                                 input_type=args.input_type,
                                 full_test=False)
    epoch_size = len(train_dataset) // args.batch_size
    print('len(train_dataset) : ', len(train_dataset))
    print('epoch_size : ', epoch_size)
    exit()
    print('Training SSD on', train_dataset.name)

    if args.visdom:

        import visdom
        viz = visdom.Visdom()
        viz.port = args.vis_port
        viz.env = args.exp_name
        # initialize visdom loss plot
        lot = viz.line(X=torch.zeros((1, )).cpu(),
                       Y=torch.zeros((1, 6)).cpu(),
                       opts=dict(xlabel='Iteration',
                                 ylabel='Loss',
                                 title='Current SSD Training Loss',
                                 legend=[
                                     'REG', 'CLS', 'AVG', 'S-REG', ' S-CLS',
                                     ' S-AVG'
                                 ]))
        # initialize visdom meanAP and class APs plot
        legends = ['meanAP']
        for cls in CLASSES:
            legends.append(cls)
        val_lot = viz.line(X=torch.zeros((1, )).cpu(),
                           Y=torch.zeros((1, args.num_classes)).cpu(),
                           opts=dict(xlabel='Iteration',
                                     ylabel='Mean AP',
                                     title='Current SSD Validation mean AP',
                                     legend=legends))

    batch_iterator = None
    train_data_loader = data.DataLoader(train_dataset,
                                        args.batch_size,
                                        num_workers=args.num_workers,
                                        shuffle=True,
                                        collate_fn=detection_collate,
                                        pin_memory=True)
    val_data_loader = data.DataLoader(val_dataset,
                                      args.batch_size,
                                      num_workers=args.num_workers,
                                      shuffle=False,
                                      collate_fn=detection_collate,
                                      pin_memory=True)
    itr_count = 0
    torch.cuda.synchronize()
    t0 = time.perf_counter()
    iteration = 0
    while iteration <= args.max_iter:
        for i, (images, targets, img_indexs) in enumerate(train_data_loader):

            if iteration > args.max_iter:
                break
            iteration += 1
            if args.cuda:
                images = Variable(images.cuda())
                targets = [
                    Variable(anno.cuda(), volatile=True) for anno in targets
                ]
            else:
                images = Variable(images)
                targets = [Variable(anno, volatile=True) for anno in targets]
            # forward
            out = net(images)
            # backprop
            optimizer.zero_grad()

            loss_l, loss_c = criterion(out, targets)
            loss = loss_l + loss_c
            loss.backward()
            optimizer.step()
            scheduler.step()
            loc_loss = loss_l.data[0]
            conf_loss = loss_c.data[0]
            # print('Loss data type ',type(loc_loss))
            loc_losses.update(loc_loss)
            cls_losses.update(conf_loss)
            losses.update((loc_loss + conf_loss) / 2.0)

            if iteration % args.print_step == 0 and iteration > 0:
                if args.visdom:
                    losses_list = [
                        loc_losses.val, cls_losses.val, losses.val,
                        loc_losses.avg, cls_losses.avg, losses.avg
                    ]
                    viz.line(X=torch.ones((1, 6)).cpu() * iteration,
                             Y=torch.from_numpy(
                                 np.asarray(losses_list)).unsqueeze(0).cpu(),
                             win=lot,
                             update='append')

                torch.cuda.synchronize()
                t1 = time.perf_counter()
                batch_time.update(t1 - t0)

                print_line = 'Itration {:06d}/{:06d} loc-loss {:.3f}({:.3f}) cls-loss {:.3f}({:.3f}) ' \
                             'average-loss {:.3f}({:.3f}) Timer {:0.3f}({:0.3f})'.format(
                              iteration, args.max_iter, loc_losses.val, loc_losses.avg, cls_losses.val,
                              cls_losses.avg, losses.val, losses.avg, batch_time.val, batch_time.avg)

                torch.cuda.synchronize()
                t0 = time.perf_counter()
                log_file.write(print_line + '\n')
                print(print_line)

                # if args.visdom and args.send_images_to_visdom:
                #     random_batch_index = np.random.randint(images.size(0))
                #     viz.image(images.data[random_batch_index].cpu().numpy())
                itr_count += 1

                if itr_count % args.loss_reset_step == 0 and itr_count > 0:
                    loc_losses.reset()
                    cls_losses.reset()
                    losses.reset()
                    batch_time.reset()
                    print('Reset accumulators of ', args.exp_name, ' at',
                          itr_count * args.print_step)
                    itr_count = 0

            if (iteration % args.eval_step == 0
                    or iteration == 5000) and iteration > 0:
                torch.cuda.synchronize()
                tvs = time.perf_counter()
                print('Saving state, iter:', iteration)
                torch.save(
                    net.state_dict(), args.save_root + 'ssd300_ucf24_' +
                    repr(iteration) + '.pth')

                net.eval()  # switch net to evaluation mode
                mAP, ap_all, ap_strs = validate(args,
                                                net,
                                                val_data_loader,
                                                val_dataset,
                                                iteration,
                                                iou_thresh=args.iou_thresh)

                for ap_str in ap_strs:
                    print(ap_str)
                    log_file.write(ap_str + '\n')
                ptr_str = '\nMEANAP:::=>' + str(mAP) + '\n'
                print(ptr_str)
                log_file.write(ptr_str)

                if args.visdom:
                    aps = [mAP]
                    for ap in ap_all:
                        aps.append(ap)
                    viz.line(
                        X=torch.ones((1, args.num_classes)).cpu() * iteration,
                        Y=torch.from_numpy(np.asarray(aps)).unsqueeze(0).cpu(),
                        win=val_lot,
                        update='append')
                net.train()  # Switch net back to training mode
                torch.cuda.synchronize()
                t0 = time.perf_counter()
                prt_str = '\nValidation TIME::: {:0.3f}\n\n'.format(t0 - tvs)
                print(prt_str)
                log_file.write(ptr_str)

    log_file.close()
示例#8
0
def main():
    args.step_values = [int(val) for val in args.step_values.split(',')]
    # args.loss_reset_step = 10
    args.log_step = 10
    args.dataset = args.dataset.lower()
    args.basenet = args.basenet.lower()

    args.bn = abs(args.bn)  # 0 freeze or else use bn
    if args.bn > 0:
        args.bn = 1  # update bn layer set the flag to 1

    args.shared_heads = abs(
        args.shared_heads)  # 0 no sharing of feature else yes
    if args.shared_heads > 0:
        args.shared_heads = 1

    args.exp_name = 'FPN{:d}-{:s}sh{:02d}-{:s}-bs{:02d}-{:s}-{:s}-lr{:05d}-bn{:d}'.format(
        args.input_dim, args.anchor_type, args.shared_heads,
        args.dataset, args.batch_size, args.basenet, args.loss_type,
        int(args.lr * 100000), args.bn)

    args.save_root += args.dataset + '/'
    args.save_root = args.save_root + 'cache/' + args.exp_name + '/'

    if not os.path.isdir(
            args.save_root):  # if save directory doesn't exist create it
        os.makedirs(args.save_root)

    source_dir = args.save_root + '/source/'  # where to save the source
    utils.copy_source(source_dir)

    anchors = 'None'
    with torch.no_grad():
        if args.anchor_type == 'kmeans':
            anchorbox = kanchorBoxes(input_dim=args.input_dim,
                                     dataset=args.dataset)
        else:
            anchorbox = anchorBox(args.anchor_type,
                                  input_dim=args.input_dim,
                                  dataset=args.dataset)
        anchors = anchorbox.forward()
        args.ar = anchorbox.ar

    args.num_anchors = anchors.size(0)

    if args.dataset == 'coco':
        args.train_sets = ['train2017']
        args.val_sets = ['val2017']
    else:
        args.train_sets = ['train2007', 'val2007', 'train2012', 'val2012']
        args.val_sets = ['test2007']

    args.means = [0.485, 0.456, 0.406]
    args.stds = [0.229, 0.224, 0.225]

    print('\nLoading Datasets')
    train_dataset = Detection(args,
                              train=True,
                              image_sets=args.train_sets,
                              transform=Augmentation(args.input_dim,
                                                     args.means, args.stds))
    print('Done Loading Dataset Train Dataset :::>>>\n',
          train_dataset.print_str)
    val_dataset = Detection(args,
                            train=False,
                            image_sets=args.val_sets,
                            transform=BaseTransform(args.input_dim, args.means,
                                                    args.stds),
                            full_test=False)
    print('Done Loading Dataset Validation Dataset :::>>>\n',
          val_dataset.print_str)

    args.num_classes = len(train_dataset.classes) + 1
    args.classes = train_dataset.classes
    args.bias_heads = args.bias_heads > 0
    args.head_size = 256

    if args.shared_heads > 0:
        net = build_fpn_shared_heads(args.basenet,
                                     args.model_dir,
                                     ar=args.ar,
                                     head_size=args.head_size,
                                     num_classes=args.num_classes,
                                     bias_heads=args.bias_heads)
    else:
        net = build_fpn_unshared(args.basenet,
                                 args.model_dir,
                                 ar=args.ar,
                                 head_size=args.head_size,
                                 num_classes=args.num_classes,
                                 bias_heads=args.bias_heads)

    net = net.cuda()

    if args.ngpu > 1:
        print('\nLets do dataparallel\n')
        net = torch.nn.DataParallel(net)

    if args.loss_type == 'mbox':
        criterion = MultiBoxLoss()
        optimizer = optim.SGD(net.parameters(),
                              lr=args.lr,
                              momentum=args.momentum,
                              weight_decay=args.weight_decay)
    elif args.loss_type == 'yolo':
        criterion = YOLOLoss()
        optimizer = optim.Adam(net.parameters())
    elif args.loss_type == 'yolo':
        criterion = FocalLoss()
        optimizer = optim.Adam(net.parameters())
    else:
        error('Define correct loss type')

    scheduler = MultiStepLR(optimizer,
                            milestones=args.step_values,
                            gamma=args.gamma)

    train(args, net, anchors, optimizer, criterion, scheduler, train_dataset,
          val_dataset)
示例#9
0
# cfg = VOC_300
cfg = COCO_512

priorbox = PriorBox(cfg)
with torch.no_grad():
    priors = priorbox.forward()
    if args.cuda:
        priors = priors.cuda()
# numclass = 21
numclass = 81
start_load = time.time()
img = cv2.imread(args.img)
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
net = build_net('test', args.size, numclass)  # initialize detector

transform = BaseTransform(net.size, (123, 117, 104), (2, 0, 1))
with torch.no_grad():
    x = transform(img).unsqueeze(0)
    if args.cuda:
        x = x.cuda()
        scale = scale.cuda()
state_dict = torch.load(args.trained_model, map_location='cpu')
# create new OrderedDict that does not contain `module.`
from collections import OrderedDict

new_state_dict = OrderedDict()
for k, v in state_dict.items():
    head = k[:7]
    if head == 'module.':
        name = k[7:]  # remove `module.`
    else:
示例#10
0
                        f.write(
                            str(pred_num) + ' label: ' + label_name +
                            ' score: ' + str(score) + ' ' +
                            ' || '.join(str(c) for c in coords) + '\n')
                    j += 1
        except:
            continue


if __name__ == '__main__':
    # load net
    #num_classes = len(VOC_CLASSES) + 1 # +1 background
    num_classes = len(SVG_CLASSES) + 1
    net = build_ssd('test', 300, num_classes)  # initialize SSD
    net.load_state_dict(torch.load(args.trained_model))
    net.eval()
    print('Finished loading model!')
    # load data
    #testset = VOCDetection(args.voc_root, [('2007', 'test')], None, AnnotationTransform())
    testset = SVGDetection(args.svg_root, image_set, None, LabelTransform())
    if args.cuda:
        net = net.cuda()
        cudnn.benchmark = True
    # evaluation
    test_net(args.save_folder,
             net,
             args.cuda,
             testset,
             BaseTransform(net.size, (102, 102, 102)),
             thresh=args.visual_threshold)
示例#11
0

def evaluate_detections(box_list, output_dir, dataset):
    write_voc_results_file(box_list, dataset)
    do_python_eval(output_dir)


if __name__ == '__main__':
    # load net
    num_classes = len(VOC_CLASSES) + 1  # +1 background
    net = build_ssd('test', 512, num_classes)  # initialize SSD
    net.load_state_dict(torch.load(args.trained_model))
    net.eval()
    log.l.info('Finished loading model!')
    # load data
    dataset = VOCDetection(args.voc_root, [('2007', set_type)],
                           BaseTransform(512, dataset_mean),
                           AnnotationTransform())
    if args.cuda:
        net = net.cuda()
        cudnn.benchmark = True
    # evaluation
    test_net(args.save_folder,
             net,
             args.cuda,
             dataset,
             BaseTransform(net.size, dataset_mean),
             args.top_k,
             512,
             thresh=args.confidence_threshold)
示例#12
0
        Map_loc = lambda storage, loc: storage
    else:
        Map_loc = 'cpu'
    state_dict = torch.load(configs.eval.model_name, map_location=Map_loc)
    if 'net_state' in state_dict.keys():
        state_dict = state_dict['net_state']
    net.load_state_dict(state_dict)
    net.eval()

    if configs.eval.cuda:
        net = net.cuda()
        cudnn.benchmark = True

    # Load dataset.
    dataset = TreeDataset(configs.dataset,
                          transform=BaseTransform(configs.model.input_size,
                                                  configs.model.pixel_means))

    # Detect objects.
    if not os.path.isfile(ALL_DETECTIONS_FILEPATH):
        detect_objects(configs, net, dataset)
    elif configs.eval.overwrite_all_detections:
        print('Overwriting detections in:')
        countdown(5)
        detect_objects(configs, net, dataset)
    else:
        print("{} has been detected. Skipping object detections.".format(
            ALL_DETECTIONS_FILEPATH))

    # Evaluate detections
    evaluate_detections(dataset, configs)
示例#13
0
    def __init__(self):
        self.cfg = cfg

        # Load data
        print('===> Loading data')
        self.train_loader = load_data(
            cfg.dataset, 'train') if 'train' in cfg.phase else None
        self.eval_loader = load_data(cfg.dataset,
                                     'eval') if 'eval' in cfg.phase else None
        self.test_loader = load_data(cfg.dataset,
                                     'test') if 'test' in cfg.phase else None
        # self.visualize_loader = load_data(cfg.DATASET, 'visualize') if 'visualize' in cfg.PHASE else None

        # Build model
        print('===> Building model')
        self.base_trans = BaseTransform(cfg.image_size[0],
                                        cfg.network.rgb_means,
                                        cfg.network.rgb_std, (2, 0, 1))
        self.priors = PriorBox(cfg.anchor)
        self.model = eval(cfg.model + '.build_net')(cfg.image_size[0],
                                                    cfg.dataset.num_classes)
        with torch.no_grad():
            self.priors = self.priors.forward()
        self.detector = Detect2(cfg.post_process)
        # Utilize GPUs for computation
        self.use_gpu = torch.cuda.is_available()
        if cfg.train.train_scope == '':
            trainable_param = self.model.parameters()
        else:
            trainable_param = self.trainable_param(cfg.train.train_scope)
        self.output_dir = os.path.join(cfg.output_dir, cfg.name, cfg.date)
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
        self.log_dir = os.path.join(self.output_dir, 'logs')
        if not os.path.exists(self.log_dir):
            os.makedirs(self.log_dir)
        self.checkpoint = cfg.train.checkpoint

        previous = self.find_previous()
        previous = False
        if previous:
            self.start_epoch = previous[0][-1]
            self.resume_checkpoint(previous[1][-1])
        else:
            self.start_epoch = self.initialize()
        if self.use_gpu:
            print('Utilize GPUs for computation')
            print('Number of GPU available', torch.cuda.device_count())
            self.model.cuda()
            self.priors.cuda()
            cudnn.benchmark = True
            if cfg.ngpu > 1:
                self.model = torch.nn.DataParallel(self.model,
                                                   device_ids=list(
                                                       range(cfg.ngpu)))
        # Print the model architecture and parameters
        #print('Model architectures:\n{}\n'.format(self.model))

        #print('Parameters and size:')
        #for name, param in self.model.named_parameters():
        #    print('{}: {}'.format(name, list(param.size())))
        # print trainable scope
        print('Trainable scope: {}'.format(cfg.train.train_scope))
        self.optimizer = self.configure_optimizer(trainable_param,
                                                  cfg.train.optimizer)
        self.exp_lr_scheduler = self.configure_lr_scheduler(
            self.optimizer, cfg.train.lr_scheduler)
        self.max_epochs = cfg.train.lr_scheduler.max_epochs
        # metric
        if cfg.network.multi_box_loss_type == 'origin':
            self.criterion = MultiBoxLoss2(cfg.matcher, self.priors,
                                           self.use_gpu)
        else:
            print('ERROR: ' + cfg.multi_box_loss_type + ' is not supported')
            sys.exit()
        # Set the logger
        self.writer = SummaryWriter(log_dir=self.log_dir)
        self.checkpoint_prefix = cfg.name + '_' + cfg.dataset.dataset
示例#14
0
def main():
    val_step = 25000
    val_steps = [
        5000,
    ]
    train_step = 500

    args = parser.parse_args()
    hostname = socket.gethostname()

    args.stepvalues = [int(val) for val in args.stepvalues.split(',')]

    exp_name = '{}-{}-{}-sl{:02d}-g{:d}-fs{:d}-{}-{:06d}'.format(
        args.dataset, args.arch, args.input, args.seq_len, args.gap,
        args.frame_step, args.batch_size, int(args.lr * 1000000))

    args.exp_name = exp_name
    args.root += args.dataset + '/'
    model_save_dir = args.root + 'cache/' + exp_name
    if not os.path.isdir(model_save_dir):
        os.system('mkdir -p ' + model_save_dir)

    args.model_save_dir = model_save_dir
    args.global_models_dir = os.path.expanduser(args.global_models_dir)

    if args.visdom:
        import visdom
        viz = visdom.Visdom()
        ports = {'mars': 8097, 'sun': 8096}
        viz.port = ports[hostname]
        viz.env = exp_name

        # initialize visdom loss plot
        loss_plot = viz.line(X=torch.zeros((1, )).cpu(),
                             Y=torch.zeros((1, 2)).cpu(),
                             opts=dict(xlabel='Iteration',
                                       ylabel='Losses',
                                       title='Train & Val Losses',
                                       legend=['Train-Loss', 'Val-Loss']))

        eval_plot = viz.line(
            X=torch.zeros((1, )).cpu(),
            Y=torch.zeros((1, 4)).cpu(),
            opts=dict(xlabel='Iteration',
                      ylabel='Accuracy',
                      title='Train & Val Accuracies',
                      legend=['trainTop3', 'valTop3', 'trainTop1', 'valTop1']))

    ## load dataloading configs
    input_size, means, stds = get_mean_size(args.arch)
    normalize = transforms.Normalize(mean=means, std=stds)

    # Data loading transform based on model type
    transform = transforms.Compose([transforms.ToTensor(), normalize])
    val_transform = transforms.Compose([
        transforms.Scale(int(input_size * 1.1)),
        transforms.CenterCrop(int(input_size)),
        transforms.ToTensor(),
        normalize,
    ])

    if args.arch.find('vgg') > -1:
        transform = BaseTransform(size=input_size, mean=means)
        val_transform = transform
        print('\n\ntransforms are going to be VGG type\n\n')

    train_dataset = KINETICS(args.root,
                             args.input,
                             transform,
                             netname=args.arch,
                             subsets=['train'],
                             scale_size=int(input_size * 1.1),
                             input_size=int(input_size),
                             exp_name=exp_name,
                             frame_step=args.frame_step,
                             seq_len=args.seq_len,
                             gap=args.gap)

    args.num_classes = train_dataset.num_classes
    print('Models will be cached in ', args.model_save_dir)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_dataset = KINETICS(args.root,
                           args.input,
                           val_transform,
                           netname=args.arch,
                           subsets=['val'],
                           exp_name=exp_name,
                           scale_size=int(input_size * 1.1),
                           input_size=int(input_size),
                           frame_step=args.frame_step * 6,
                           seq_len=args.seq_len,
                           gap=args.gap)

    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    model, criterion = initialise_model(args)

    parameter_dict = dict(model.named_parameters())
    params = []
    for name, param in parameter_dict.items():
        if name.find('bias') > -1:
            params += [{
                'params': [param],
                'lr': args.lr * 2,
                'weight_decay': 0
            }]
        else:
            params += [{
                'params': [param],
                'lr': args.lr,
                'weight_decay': args.weight_decay
            }]

    optimizer = torch.optim.SGD(params, args.lr, momentum=args.momentum)

    if args.resume:
        latest_file_name = '{:s}/latest.pth'.format(args.model_save_dir)
        latest_dict = torch.load(latest_file_name)
        args.start_iteration = latest_dict['iteration'] + 1
        model.load_state_dict(torch.load(latest_dict['model_file_name']))
        optimizer.load_state_dict(
            torch.load(latest_dict['optimizer_file_name']))
        log_fid = open(args.model_save_dir + '/training.log', 'a')

    else:
        log_fid = open(args.model_save_dir + '/training.log', 'w')

    log_fid.write(args.exp_name + '\n')
    for arg in vars(args):
        print(arg, getattr(args, arg))
        log_fid.write(str(arg) + ': ' + str(getattr(args, arg)) + '\n')
    log_fid.write(str(model))
    best_top1 = 0.0
    val_loss = 0.0
    val_top1 = 0.0
    val_top3 = 0.0
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top3 = AverageMeter()
    iteration = args.start_iteration
    approx_epochs = np.ceil(
        float(args.max_iterations - iteration) / len(train_loader))

    print(
        'Approx Epochs to RUN: {}, Start Ietration {} Max iterations {} # of samples in dataset {}'
        .format(approx_epochs, iteration, args.max_iterations,
                len(train_loader)))
    epoch = -1
    scheduler = MultiStepLR(optimizer,
                            milestones=args.stepvalues,
                            gamma=args.gamma)
    model.train()
    torch.cuda.synchronize()
    start = time.perf_counter()
    while iteration < args.max_iterations:
        epoch += 1
        for i, (batch, targets, __, __) in enumerate(train_loader):
            if i < len(train_loader) - 2:
                if iteration > args.max_iterations:
                    break
                iteration += 1
                #pdb.set_trace()
                #print('input size ',batch.size())
                targets = targets.cuda(async=True)
                input_var = torch.autograd.Variable(batch.cuda(async=True))
                target_var = torch.autograd.Variable(targets)

                torch.cuda.synchronize()
                data_time.update(time.perf_counter() - start)

                # compute output
                output = model(input_var)
                loss = criterion(output, target_var)
                #pdb.set_trace()
                # measure accuracy and record loss
                prec1, prec3 = accuracy(output.data, targets, topk=(1, 3))
                losses.update(loss.data[0], batch.size(0))
                top1.update(prec1[0], batch.size(0))
                top3.update(prec3[0], batch.size(0))

                # compute gradient and do SGD step
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                scheduler.step()
                # measure elapsed time
                torch.cuda.synchronize()
                batch_time.update(time.perf_counter() - start)
                start = time.perf_counter()
                if iteration % args.print_freq == 0:
                    line = 'Epoch: [{0}][{1}/{2}] Time {batch_time.val:.3f} ({batch_time.avg:.3f}) Data {data_time.val:.3f} ({data_time.avg:.3f})'.format(
                        epoch,
                        iteration,
                        len(train_loader),
                        batch_time=batch_time,
                        data_time=data_time)
                    line += 'Loss {loss.val:.4f} ({loss.avg:.4f}) Prec@1 {top1.val:.3f} ({top1.avg:.3f}) Prec@3 {top3.val:.3f} ({top3.avg:.3f})'.format(
                        loss=losses, top1=top1, top3=top3)
                    print(line)
                    log_fid.write(line + '\n')

                avgtop1 = top1.avg
                avgtop3 = top3.avg
                avgloss = losses.avg
                if (iteration % val_step == 0
                        or iteration in val_steps) and iteration > 0:
                    # evaluate on validation set
                    val_top1, val_top3, val_loss = validate(
                        args, val_loader, model, criterion)
                    line = '\n\nValidation @ {:d}: Top1 {:.2f} Top3 {:.2f} Loss {:.3f}\n\n'.format(
                        iteration, val_top1, val_top3, val_loss)
                    print(line)
                    log_fid.write(line)
                    # remember best prec@1 and save checkpoint
                    is_best = val_top1 > best_top1
                    best_top1 = max(val_top1, best_top1)
                    torch.cuda.synchronize()
                    line = '\nBest Top1 sofar {:.3f} current top1 {:.3f} Time taken for Validation {:0.3f}\n\n'.format(
                        best_top1, val_top1,
                        time.perf_counter() - start)
                    log_fid.write(line + '\n')
                    print(line)
                    save_checkpoint(
                        {
                            'epoch': epoch,
                            'iteration': iteration,
                            'arch': args.arch,
                            'val_top1': val_top1,
                            'val_top3': val_top3,
                            'val_loss': val_loss,
                            'train_top1': avgtop1,
                            'train_top3': avgtop3,
                            'train_loss': avgloss,
                            'state_dict': model.state_dict(),
                            'optimizer': optimizer.state_dict(),
                        }, is_best, args.model_save_dir)
                    if args.visdom:
                        viz.line(X=torch.ones((1, 2)).cpu() * iteration,
                                 Y=torch.Tensor([avgloss,
                                                 val_loss]).unsqueeze(0).cpu(),
                                 win=loss_plot,
                                 update='append')
                        viz.line(X=torch.ones((1, 4)).cpu() * iteration,
                                 Y=torch.Tensor(
                                     [avgtop3, val_top3, avgtop1,
                                      val_top1]).unsqueeze(0).cpu(),
                                 win=eval_plot,
                                 update='append')

                    model.train()

                if iteration % train_step == 0 and iteration > 0:
                    if args.visdom:
                        viz.line(X=torch.ones((1, 2)).cpu() * iteration,
                                 Y=torch.Tensor([avgloss,
                                                 val_loss]).unsqueeze(0).cpu(),
                                 win=loss_plot,
                                 update='append')
                        viz.line(X=torch.ones((1, 4)).cpu() * iteration,
                                 Y=torch.Tensor(
                                     [avgtop3, val_top3, avgtop1,
                                      val_top1]).unsqueeze(0).cpu(),
                                 win=eval_plot,
                                 update='append')
                    top1.reset()
                    top3.reset()
                    losses.reset()
                    print('RESET::=> ', args.exp_name)
示例#15
0
def train():
    net.train()
    # loss counters
    epoch = 0
    if args.resume_net:
        epoch = 0 + args.resume_epoch
    epoch_size = len(train_dataset) // args.batch_size # How many batch size is needed.
    max_iter = args.max_epoch * epoch_size
    print('max_iter : ', max_iter)

    stepvalues_VOC = (150 * epoch_size, 200 * epoch_size, 250 * epoch_size)
    stepvalues_COCO = (90 * epoch_size, 120 * epoch_size, 140 * epoch_size)
    stepvalues = (stepvalues_VOC, stepvalues_COCO)[args.dataset == 'COCO']
    print('Training', args.version, 'on', train_dataset.name)
    step_index = 0

    ## To visualize
    if args.visdom:
        # initialize visdom loss plot
        lot = viz.line(
            X=torch.zeros((1,)).cpu(),
            Y=torch.zeros((1, 3)).cpu(),
            opts=dict(
                xlabel='Iteration',
                ylabel='Loss',
                title='Current SSD Training Loss',
                legend=['Loc Loss', 'Conf Loss', 'Loss']
            )
        )
        epoch_lot = viz.line(
            X=torch.zeros((1,)).cpu(),
            Y=torch.zeros((1, 3)).cpu(),
            opts=dict(
                xlabel='Epoch',
                ylabel='Loss',
                title='Epoch SSD Training Loss',
                legend=['Loc Loss', 'Conf Loss', 'Loss']
            )
        )
    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
    else:
        start_iter = 0

    log_file = open(log_file_path, 'w')
    batch_iterator = None
    mean_loss_c = 0
    mean_loss_l = 0
    for iteration in range(start_iter, max_iter + 10):
        # -------------------------------------------------------------------------------------------------------------------- #
        if iteration % epoch_size == 0:
            # create batch iterator
            batch_iterator = iter(data.DataLoader(train_dataset, batch_size, shuffle=True, num_workers=args.num_workers, collate_fn=detection_collate))
            # loc_loss = 0
            # conf_loss = 0

            if epoch % args.save_frequency == 0 and epoch > 0:
                torch.save(net.state_dict(), os.path.join(save_folder, args.version + '_' + args.dataset + '_epoches_' + repr(epoch) + '.pth'))
            ## Evaluation
            if epoch % args.test_frequency == 0 and epoch > 0:
                net.eval()
                top_k = (300, 200)[args.dataset == 'COCO']
                if args.dataset == 'VOC':
                    # net.module.size -> net.size.
                    APs, mAP = test_net(test_save_dir, net, detector, args.cuda, testset, BaseTransform(net.module.size, rgb_means, rgb_std, (2, 0, 1)), top_k, thresh=0.01)
                    APs = [str(num) for num in APs]
                    mAP = str(mAP)
                    log_file.write(str(iteration) + ' APs:\n' + '\n'.join(APs))
                    log_file.write('\n mAP:\n' + mAP + '\n')

                    # -------------------------------------------------------------------------------------------------------------------- #
                    # 1. Log scalar values (scalar summary)
                    # info = {'accuracy': mAP}
                    #
                    # for tag, value in info.items():
                    #     logger.scalar_summary(tag, value, iteration + 1)
                    # -------------------------------------------------------------------------------------------------------------------- #

                else:
                    test_net(test_save_dir, net, detector, args.cuda, testset, BaseTransform(net.module.size, rgb_means, rgb_std, (2, 0, 1)), top_k, thresh=0.01)
                net.train()

            epoch += 1
        # -------------------------------------------------------------------------------------------------------------------- #
        load_t0 = time.time()
        if iteration in stepvalues:
            step_index = stepvalues.index(iteration) + 1
            if args.visdom:
                viz.line(
                    X=torch.ones((1, 3)).cpu() * epoch,
                    Y=torch.Tensor([mean_loss_l, mean_loss_c, mean_loss_l + mean_loss_c]).unsqueeze(0).cpu() / epoch_size,
                    win=epoch_lot,
                    update='append'
                )
        lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index, iteration, epoch_size)

        # load train data
        images, targets = next(batch_iterator)

        if args.cuda:
            images = images.cuda()
            targets = [ann.cuda() for ann in targets]
        else:
            images = images
            targets = [ann for ann in targets]

        # forward
        out = net(images)
        # backprop
        optimizer.zero_grad()
        # arm branch loss
        loss_l, loss_c = criterion(out, priors, targets)
        # odm branch loss

        mean_loss_c += loss_c.item()
        mean_loss_l += loss_l.item()

        loss = loss_l + loss_c
        loss.backward()
        optimizer.step()
        load_t1 = time.time()
        # -------------------------------------------------------------------------------------------------------------------- #
        if iteration % epoch_size == 0:
            # 1. Log scalar values (scalar summary)
            info = {'loss': loss.item(), 'loc_loss': loss_l.item(), 'conf_loss': loss_c.item()}

            for tag, value in info.items():
                logger.scalar_summary(tag, value, iteration + 1)
        # -------------------------------------------------------------------------------------------------------------------- #

        # -------------------------------------------------------------------------------------------------------------------- #
        if iteration % 10 == 0:
            print('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size)
                  + '|| Totel iter ' + repr(iteration) + ' || L: %.4f C: %.4f||' % (mean_loss_l / 10, mean_loss_c / 10) +
                  'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' % (lr))

            log_file.write('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size)
                + '|| Totel iter ' +repr(iteration) + ' || L: %.4f C: %.4f||' % (
                    mean_loss_l / 10, mean_loss_c / 10) +'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' % (lr) + '\n')

            mean_loss_c = 0
            mean_loss_l = 0
            if args.visdom and args.send_images_to_visdom:
                random_batch_index = np.random.randint(images.size(0))
                viz.image(images.data[random_batch_index].cpu().numpy())
        # -------------------------------------------------------------------------------------------------------------------- #
    log_file.close()
    torch.save(net.state_dict(), os.path.join(save_folder,'Final_' + args.version + '_' + args.dataset + '.pth'))
示例#16
0
            self.ptr = 0
            self.size = size
            self.ids = [i for i in range(lst.size(0))]
            if shuffle:
                random.shuffle(self.ids)
            if drop_last:
                self.ids = self.ids[:int(lst.size(0) / size) * size]

        def __next__(self):
            self.ptr += self.size
            if self.ptr > len(self.lst):
                raise StopIteration()
            return self.lst[self.ids[self.ptr -
                                     self.size:self.ptr]].clone().detach()

    def __iter__(self):
        return self.IterInstance(self.bb_data, self.b_size, self.shuffle,
                                 self.drop_last)


if __name__ == '__main__':
    rt = COCO_ROOT
    data_set = COCODetection(
        root=rt,
        image_sets=(('2017', 'train'), ),
        transform=BaseTransform(300, (104, 117, 123)),
        target_transform=COCOAnnotationTransform(keep_difficult=False))
    loader = BoundingBoxesLoader(data_set,
                                 [i for i in range(len(COCO_CLASSES))],
                                 cache_pth='../truths/gts_coco_17train.pth')
示例#17
0
    for k, v in state_dict.items():
        name = k[7:]
        new_state_dict[name] = v
    net.load_state_dict(new_state_dict)
    # eval mode
    net.eval()
    print('Finished loading model!')
    # load data
    #testset = VOCDetection(args.voc_root, [('2007', 'test')], None, AnnotationTransform())

    if args.cuda:
        net = net.cuda()
        cudnn.benchmark = True
    # evaluation
    # cv_idx_for_test must be equal to the checkpoint idx of the trained model (otherwise cheat)
    means = (34, 34, 34)
    trainset = FISHdetection(ct_train[cv_idx_for_test],
                             coord_ssd_train[cv_idx_for_test], None,
                             'lesion_train')
    validset = FISHdetection(ct_valid[cv_idx_for_test],
                             coord_ssd_valid[cv_idx_for_test], None,
                             'lesion_valid')

    # allset = FISHdetection(np.vstack(ct), np.vstack(coord).astype(np.float64), None, 'lesion_all')

    test_net(args.save_folder,
             net,
             args.cuda,
             validset,
             BaseTransform(size, means),
             thresh=args.visual_threshold)
示例#18
0
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        head = k[:7]
        if head == 'module.':
            name = k[7:] # remove `module.`
        else:
            name = k
        new_state_dict[name] = v
    net.load_state_dict(new_state_dict)
    net.eval()
    print('Finished loading model!')
    # load data
    print(args.picture_path)
    testset = VOCDetection(
        args.picture_path, [('2007', 'test')], None, AnnotationTransform())
    if args.cuda:
        net = net.cuda()
        cudnn.benchmark = True
    else:
        net = net.cpu()
    # evaluation
    #top_k = (300, 200)[args.dataset == 'COCO']
    top_k = 200
    detector = Detect(num_classes,0,cfg)
    save_folder = os.path.join(args.save_folder,args.dataset)
    rgb_means = (103.94,116.78,123.68)
    test_net(save_folder, net, detector, args.cuda, testset, args.device_ip, args.start_time, args.num_th, args.bbox_params, args.save_path,
             BaseTransform(net.size, rgb_means, (2, 0, 1)),
             top_k, thresh=0.01)
def train():
    net.train()
    # loss counters
    loc_loss = 0  # epoch
    conf_loss = 0
    epoch = 0
    if args.resume_net:
        epoch = 0 + args.resume_epoch
    epoch_size = len(train_dataset) // args.batch_size
    max_iter = args.max_epoch * epoch_size

    stepvalues_VOC = (150 * epoch_size, 200 * epoch_size, 250 * epoch_size)
    stepvalues_COCO = (90 * epoch_size, 120 * epoch_size, 140 * epoch_size)
    stepvalues = (stepvalues_VOC, stepvalues_COCO)[args.dataset == 'COCO']
    print('Training', args.version, 'on', train_dataset.name)
    step_index = 0

    if args.visdom:
        # initialize visdom loss plot
        lot = viz.line(X=torch.zeros((1, )).cpu(),
                       Y=torch.zeros((1, 3)).cpu(),
                       opts=dict(xlabel='Iteration',
                                 ylabel='Loss',
                                 title='Current SSD Training Loss',
                                 legend=['Loc Loss', 'Conf Loss', 'Loss']))
        epoch_lot = viz.line(X=torch.zeros((1, )).cpu(),
                             Y=torch.zeros((1, 3)).cpu(),
                             opts=dict(
                                 xlabel='Epoch',
                                 ylabel='Loss',
                                 title='Epoch SSD Training Loss',
                                 legend=['Loc Loss', 'Conf Loss', 'Loss']))
    if args.resume_epoch > 0:
        start_iter = args.resume_epoch * epoch_size
    else:
        start_iter = 0

    #log_file = open(log_file_path,'w')
    batch_iterator = None
    mean_odm_loss_c = 0
    mean_odm_loss_l = 0
    mean_arm_loss_c = 0
    mean_arm_loss_l = 0
    for iteration in range(start_iter, max_iter + 10):
        if (iteration % epoch_size == 0):
            # create batch iterator
            batch_iterator = iter(
                data.DataLoader(train_dataset,
                                batch_size,
                                shuffle=True,
                                num_workers=args.num_workers,
                                collate_fn=detection_collate))
            loc_loss = 0
            conf_loss = 0
            if epoch % args.save_frequency == 0 and epoch > 0:
                torch.save(
                    net.state_dict(),
                    os.path.join(
                        save_folder, args.version + '_' + args.dataset +
                        '_epoches_' + repr(epoch) + '.pth'))
            if epoch % args.test_frequency == 0 and epoch > 0:
                net.eval()
                top_k = (300, 200)[args.dataset == 'COCO']
                if args.dataset == 'VOC':
                    APs, mAP = test_net(test_save_dir,
                                        net,
                                        detector,
                                        args.cuda,
                                        testset,
                                        BaseTransform(net.module.size,
                                                      rgb_means, rgb_std,
                                                      (2, 0, 1)),
                                        top_k,
                                        thresh=0.01)
                    APs = [str(num) for num in APs]
                    mAP = str(mAP)
                    with open(log_file_path, 'w+') as log_file:
                        log_file.write(
                            str(iteration) + ' APs:\n' + '\n'.join(APs))
                        log_file.write('\nmAP:\n' + mAP + '\n')
                else:
                    test_net(test_save_dir,
                             net,
                             detector,
                             args.cuda,
                             testset,
                             BaseTransform(net.module.size, rgb_means, rgb_std,
                                           (2, 0, 1)),
                             top_k,
                             thresh=0.01)

                net.train()
            epoch += 1

        load_t0 = time.time()
        if iteration in stepvalues:
            step_index = stepvalues.index(iteration) + 1
            if args.visdom:
                viz.line(
                    X=torch.ones((1, 3)).cpu() * epoch,
                    Y=torch.Tensor([loc_loss, conf_loss, loc_loss + conf_loss
                                    ]).unsqueeze(0).cpu() / epoch_size,
                    win=epoch_lot,
                    update='append')
        lr = adjust_learning_rate(optimizer, args.gamma, epoch, step_index,
                                  iteration, epoch_size)

        # load train data
        images, targets = next(batch_iterator)

        #print(np.sum([torch.sum(anno[:,-1] == 2) for anno in targets]))

        if args.cuda:
            images = Variable(images.cuda())
            targets = [
                Variable(anno.cuda(), volatile=True) for anno in targets
            ]
        else:
            images = Variable(images)
            targets = [Variable(anno, volatile=True) for anno in targets]
        # forward
        out = net(images)
        arm_loc, arm_conf, odm_loc, odm_conf = out
        # backprop
        optimizer.zero_grad()
        #arm branch loss
        arm_loss_l, arm_loss_c = arm_criterion((arm_loc, arm_conf), priors,
                                               targets)
        #odm branch loss
        odm_loss_l, odm_loss_c = odm_criterion(
            (odm_loc, odm_conf), priors, targets, (arm_loc, arm_conf), False)

        mean_arm_loss_c += arm_loss_c.data[0]
        mean_arm_loss_l += arm_loss_l.data[0]
        mean_odm_loss_c += odm_loss_c.data[0]
        mean_odm_loss_l += odm_loss_l.data[0]

        loss = arm_loss_l + arm_loss_c + odm_loss_l + odm_loss_c
        loss.backward()
        optimizer.step()
        load_t1 = time.time()
        if iteration % 10 == 0:
            print('Epoch:' + repr(epoch) + ' || epochiter: ' +
                  repr(iteration % epoch_size) + '/' + repr(epoch_size) +
                  '|| Total iter ' + repr(iteration) +
                  ' || AL: %.4f AC: %.4f OL: %.4f OC: %.4f||' %
                  (mean_arm_loss_l / 10, mean_arm_loss_c / 10,
                   mean_odm_loss_l / 10, mean_odm_loss_c / 10) +
                  'Batch time: %.4f sec. ||' % (load_t1 - load_t0) +
                  'LR: %.8f' % (lr))
            # log_file.write('Epoch:' + repr(epoch) + ' || epochiter: ' + repr(iteration % epoch_size) + '/' + repr(epoch_size)
            #       + '|| Total iter ' +
            #        repr(iteration) + ' || AL: %.4f AC: %.4f OL: %.4f OC: %.4f||' % (
            #        mean_arm_loss_l / 10,mean_arm_loss_c / 10,mean_odm_loss_l / 10, mean_odm_loss_c / 10) +
            #       'Batch time: %.4f sec. ||' % (load_t1 - load_t0) + 'LR: %.8f' % (lr)+'\n')

            mean_odm_loss_c = 0
            mean_odm_loss_l = 0
            mean_arm_loss_c = 0
            mean_arm_loss_l = 0
            if args.visdom and args.send_images_to_visdom:
                random_batch_index = np.random.randint(images.size(0))
                viz.image(images.data[random_batch_index].cpu().numpy())
    log_file.close()
    torch.save(
        net.state_dict(),
        os.path.join(save_folder,
                     'Final_' + args.version + '_' + args.dataset + '.pth'))
示例#20
0
        # 'weights/ssd300_0712_150000.pth', 'weights/ssd300_0712_160000.pth'
        # 'weights/ssd512_tme_255000_cut_90.4.pth'
        'weights/ssd512_tme_255000.pth'
    ]

    for model_name in check_point_list:
        # load net
        num_classes = len(TME_CLASSES) + 1  # +1 background
        net = build_ssd('test', args.dim, num_classes)  # initialize SSD
        # net.load_state_dict(torch.load(args.trained_model))
        net.load_state_dict(torch.load(model_name))
        net.eval()
        log.l.info(model_name)
        log.l.info('Finished loading model!')
        # load data
        dataset = TMEDetection(DataRoot, [(set_type)],
                               BaseTransform(args.dim, dataset_mean),
                               AnnotationTransform())
        if args.cuda:
            net = net.cuda()
            cudnn.benchmark = True
        # evaluation
        test_net(args.save_folder,
                 net,
                 args.cuda,
                 dataset,
                 BaseTransform(net.size, dataset_mean),
                 args.top_k,
                 args.dim,
                 thresh=args.confidence_threshold)
示例#21
0
imgs_path_dict = {'VOC': 'imgs/VOC', 'COCO': 'imgs/COCO'}
im_path = imgs_path_dict[args.dataset]

imgs_result_path = os.path.join(im_path, 'im_res')
if not os.path.exists(imgs_result_path):
    os.makedirs(imgs_result_path)

with torch.no_grad():
    priors = priorbox.forward()
    if cfg.test_cfg.cuda:
        net = net.cuda()
        priors = priors.cuda()
        cudnn.benchmark = True
    else:
        net = net.cpu()
_preprocess = BaseTransform(cfg.model.input_size, cfg.model.rgb_means,
                            (2, 0, 1))
detector = Detect(num_classes, cfg.loss.bkg_label, anchor_config)


def _to_color(indx, base):
    """ return (b, r, g) tuple"""
    base2 = base * base
    b = 2 - indx / base2
    r = 2 - (indx % base2) / base
    g = 2 - (indx % base2) % base
    return b * 127, r * 127, g * 127


base = int(np.ceil(pow(num_classes, 1. / 3)))
colors = [_to_color(x, base) for x in range(num_classes)]
cats = [
def main():
    global my_dict, keys, k_len, arr, xxx, args, log_file, best_prec1

    parser = argparse.ArgumentParser(
        description='Single Shot MultiBox Detector Training')
    parser.add_argument('--version',
                        default='v2',
                        help='conv11_2(v2) or pool6(v1) as last layer')
    parser.add_argument('--basenet',
                        default='vgg16_reducedfc.pth',
                        help='pretrained base model')
    parser.add_argument('--dataset',
                        default='ucf24',
                        help='pretrained base model')
    parser.add_argument('--ssd_dim',
                        default=300,
                        type=int,
                        help='Input Size for SSD')  # only support 300 now
    parser.add_argument(
        '--modality',
        default='rgb',
        type=str,
        help='INput tyep default rgb options are [rgb,brox,fastOF]')
    parser.add_argument('--jaccard_threshold',
                        default=0.5,
                        type=float,
                        help='Min Jaccard index for matching')
    parser.add_argument('--batch_size',
                        default=40,
                        type=int,
                        help='Batch size for training')
    parser.add_argument('--num_workers',
                        default=0,
                        type=int,
                        help='Number of workers used in dataloading')
    parser.add_argument('--max_iter',
                        default=120000,
                        type=int,
                        help='Number of training iterations')
    parser.add_argument('--man_seed',
                        default=123,
                        type=int,
                        help='manualseed for reproduction')
    parser.add_argument('--cuda',
                        default=True,
                        type=str2bool,
                        help='Use cuda to train model')
    parser.add_argument('--ngpu',
                        default=1,
                        type=str2bool,
                        help='Use cuda to train model')
    parser.add_argument('--lr',
                        '--learning-rate',
                        default=0.0005,
                        type=float,
                        help='initial learning rate')
    parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
    parser.add_argument('--stepvalues',
                        default='70000,90000',
                        type=str,
                        help='iter number when learning rate to be dropped')
    parser.add_argument('--weight_decay',
                        default=5e-4,
                        type=float,
                        help='Weight decay for SGD')
    parser.add_argument('--gamma',
                        default=0.2,
                        type=float,
                        help='Gamma update for SGD')
    parser.add_argument('--log_iters',
                        default=True,
                        type=bool,
                        help='Print the loss at each iteration')
    parser.add_argument('--visdom',
                        default=False,
                        type=str2bool,
                        help='Use visdom to for loss visualization')
    parser.add_argument('--data_root',
                        default=relative_path + 'realtime/',
                        help='Location of VOC root directory')
    parser.add_argument('--save_root',
                        default=relative_path + 'realtime/saveucf24/',
                        help='Location to save checkpoint models')

    parser.add_argument('--iou_thresh',
                        default=0.5,
                        type=float,
                        help='Evaluation threshold')
    parser.add_argument('--conf_thresh',
                        default=0.01,
                        type=float,
                        help='Confidence threshold for evaluation')
    parser.add_argument('--nms_thresh',
                        default=0.45,
                        type=float,
                        help='NMS threshold')
    parser.add_argument('--topk',
                        default=50,
                        type=int,
                        help='topk for evaluation')
    parser.add_argument('--clip_gradient',
                        default=40,
                        type=float,
                        help='gradients clip')
    parser.add_argument('--resume',
                        default=None,
                        type=str,
                        help='Resume from checkpoint')
    parser.add_argument('--start_epoch',
                        default=0,
                        type=int,
                        help='start epoch')
    parser.add_argument('--epochs',
                        default=35,
                        type=int,
                        metavar='N',
                        help='number of total epochs to run')
    parser.add_argument('--eval_freq',
                        default=2,
                        type=int,
                        metavar='N',
                        help='evaluation frequency (default: 5)')
    parser.add_argument('--snapshot_pref',
                        type=str,
                        default="ucf101_vgg16_ssd300_")
    parser.add_argument('--lr_milestones',
                        default=[-2, -5],
                        type=float,
                        help='initial learning rate')
    parser.add_argument('--arch', type=str, default="VGG16")
    parser.add_argument('--Finetune_SSD', default=False, type=str)
    parser.add_argument('-e',
                        '--evaluate',
                        dest='evaluate',
                        action='store_true',
                        help='evaluate model on validation set')
    parser.add_argument('--gpus', nargs='+', type=int, default=[0, 1, 2, 3])

    print(__file__)
    file_name = (__file__).split('/')[-1]
    file_name = file_name.split('.')[0]
    print(file_name)
    ## Parse arguments
    args = parser.parse_args()
    ## set random seeds
    np.random.seed(args.man_seed)
    torch.manual_seed(args.man_seed)
    if args.cuda:
        torch.cuda.manual_seed_all(args.man_seed)

    if args.cuda and torch.cuda.is_available():
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
    else:
        torch.set_default_tensor_type('torch.FloatTensor')

    args.cfg = v2
    args.train_sets = 'train'
    args.means = (104, 117, 123)
    num_classes = len(CLASSES) + 1
    args.num_classes = num_classes
    args.stepvalues = [int(val) for val in args.stepvalues.split(',')]
    args.loss_reset_step = 30
    args.eval_step = 10000
    args.print_step = 10
    args.data_root += args.dataset + '/'

    ## Define the experiment Name will used to same directory
    day = (time.strftime('%m-%d', time.localtime(time.time())))
    args.snapshot_pref = ('ucf101_CONV-SSD-{}-{}-bs-{}-{}-lr-{:05d}').format(
        args.dataset, args.modality, args.batch_size, args.basenet[:-14],
        int(args.lr * 100000)) + '_' + file_name + '_' + day
    print(args.snapshot_pref)

    if not os.path.isdir(args.save_root):
        os.makedirs(args.save_root)

    net = build_refine_ssd(300, args.num_classes)
    net = torch.nn.DataParallel(net, device_ids=args.gpus)

    if args.Finetune_SSD is True:
        print("load snapshot")
        pretrained_weights = "/data4/lilin/my_code/realtime/ucf24/rgb-ssd300_ucf24_120000.pth"
        pretrained_dict = torch.load(pretrained_weights)
        model_dict = net.state_dict()  # 1. filter out unnecessary keys
        pretrained_dict_2 = {
            k: v
            for k, v in pretrained_dict.items() if k in model_dict
        }  # 2. overwrite entries in the existing state dict
        model_dict.update(pretrained_dict_2)  # 3. load the new state dict
    elif args.resume:
        if os.path.isfile(args.resume):
            print(("=> loading checkpoint '{}'".format(args.resume)))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            net.load_state_dict(checkpoint['state_dict'])
            print(("=> loaded checkpoint '{}' (epoch {})".format(
                args.evaluate, checkpoint['epoch'])))
        else:
            print(("=> no checkpoint found at '{}'".format(args.resume)))

    elif args.modality == 'fastOF':
        print(
            'Download pretrained brox flow trained model weights and place them at:::=> ',
            args.data_root + 'ucf24/train_data/brox_wieghts.pth')
        pretrained_weights = args.data_root + 'train_data/brox_wieghts.pth'
        print('Loading base network...')
        net.load_state_dict(torch.load(pretrained_weights))
    else:
        vgg_weights = torch.load(args.data_root + 'train_data/' + args.basenet)
        print('Loading base network...')
        net.module.vgg.load_state_dict(vgg_weights)

    if args.cuda:
        net = net.cuda()

    # initialize newly added layers' weights with xavier method
    if args.Finetune_SSD is False and args.resume is None:
        print('Initializing weights for extra layers and HEADs...')
        net.module.clstm_1.apply(weights_init)
        net.module.clstm_2.apply(weights_init)
        net.module.extras_r.apply(weights_init)
        net.module.loc_r.apply(weights_init)
        net.module.conf_r.apply(weights_init)

        net.module.extras.apply(weights_init)
        net.module.loc.apply(weights_init)
        net.module.conf.apply(weights_init)

    parameter_dict = dict(net.named_parameters(
    ))  # Get parmeter of network in dictionary format wtih name being key
    params = []

    #Set different learning rate to bias layers and set their weight_decay to 0
    for name, param in parameter_dict.items():
        if name.find('vgg') > -1 and int(
                name.split('.')[2]) < 23:  # :and name.find('cell') <= -1
            param.requires_grad = False
            print(name, 'layer parameters will be fixed')
        else:
            if name.find('bias') > -1:
                print(
                    name, 'layer parameters will be trained @ {}'.format(
                        args.lr * 2))
                params += [{
                    'params': [param],
                    'lr': args.lr * 2,
                    'weight_decay': 0
                }]
            else:
                print(name,
                      'layer parameters will be trained @ {}'.format(args.lr))
                params += [{
                    'params': [param],
                    'lr': args.lr,
                    'weight_decay': args.weight_decay
                }]

    optimizer = optim.SGD(params,
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    criterion = RecurrentMultiBoxLoss(args.num_classes, 0.5, True, 0, True, 3,
                                      0.5, False, args.cuda)
    scheduler = None
    # scheduler = LogLR(optimizer, lr_milestones=args.lr_milestones, total_epoch=args.epochs)
    scheduler = MultiStepLR(optimizer,
                            milestones=args.stepvalues,
                            gamma=args.gamma)
    print('Loading Dataset...')
    num_gpu = len(args.gpus)

    rootpath = args.data_root
    imgtype = args.modality
    imagesDir = rootpath + imgtype + '/'
    split = 1
    splitfile = rootpath + 'splitfiles/trainlist{:02d}.txt'.format(split)
    trainvideos = readsplitfile(splitfile)

    splitfile = rootpath + 'splitfiles/testlist{:02d}.txt'.format(split)
    testvideos = readsplitfile(splitfile)

    ####### val dataset does not need shuffle #######
    val_data_loader = []
    len_test = len(testvideos)
    random.shuffle(testvideos)
    for i in range(num_gpu):
        testvideos_temp = testvideos[int(i * len_test /
                                         num_gpu):int((i + 1) * len_test /
                                                      num_gpu)]
        val_dataset = UCF24Detection(args.data_root,
                                     'test',
                                     BaseTransform(args.ssd_dim, args.means),
                                     AnnotationTransform(),
                                     input_type=args.modality,
                                     full_test=False,
                                     videos=testvideos_temp,
                                     istrain=False)
        val_data_loader.append(
            data.DataLoader(val_dataset,
                            args.batch_size,
                            num_workers=args.num_workers,
                            shuffle=False,
                            collate_fn=detection_collate,
                            pin_memory=True,
                            drop_last=True))

    log_file = open(
        args.save_root + args.snapshot_pref + "_training_" + day + ".log", "w",
        1)
    log_file.write(args.snapshot_pref + '\n')

    for arg in vars(args):
        print(arg, getattr(args, arg))
        log_file.write(str(arg) + ': ' + str(getattr(args, arg)) + '\n')

    log_file.write(str(net))

    torch.cuda.synchronize()
    len_train = len(trainvideos)

    for epoch in range(args.start_epoch, args.epochs):
        ####### shuffle train dataset #######
        random.shuffle(trainvideos)
        train_data_loader = []
        for i in range(num_gpu):
            trainvideos_temp = trainvideos[int(i * len_train /
                                               num_gpu):int((i + 1) *
                                                            len_train /
                                                            num_gpu)]
            train_dataset = UCF24Detection(args.data_root,
                                           'train',
                                           SSDAugmentation(
                                               args.ssd_dim, args.means),
                                           AnnotationTransform(),
                                           input_type=args.modality,
                                           videos=trainvideos_temp,
                                           istrain=True)
            train_data_loader.append(
                data.DataLoader(train_dataset,
                                args.batch_size,
                                num_workers=args.num_workers,
                                shuffle=False,
                                collate_fn=detection_collate,
                                pin_memory=True,
                                drop_last=True))

        print("Train epoch_size: ", len(train_data_loader))
        print('Train SSD on', train_dataset.name)

        ########## train ###########
        train(train_data_loader, net, criterion, optimizer, scheduler, epoch,
              num_gpu)

        print('Saving state, epoch:', epoch)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': net.state_dict(),
                'best_prec1': best_prec1,
            },
            epoch=epoch)

        #### log lr ###
        # scheduler.step()
        # evaluate on validation set
        if (
                epoch + 1
        ) % args.eval_freq == 0 or epoch == args.epochs - 1 or epoch == 0:  #
            torch.cuda.synchronize()
            tvs = time.perf_counter()
            mAP, ap_all, ap_strs = validate(args,
                                            net,
                                            val_data_loader,
                                            val_dataset,
                                            epoch,
                                            iou_thresh=args.iou_thresh,
                                            num_gpu=num_gpu)
            # remember best prec@1 and save checkpoint
            is_best = mAP > best_prec1
            best_prec1 = max(mAP, best_prec1)
            print('Saving state, epoch:', epoch)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': net.state_dict(),
                    'best_prec1': best_prec1,
                }, is_best, epoch)

            for ap_str in ap_strs:
                print(ap_str)
                log_file.write(ap_str + '\n')
            ptr_str = '\nMEANAP:::=>' + str(mAP) + '\n'
            print(ptr_str)
            log_file.write(ptr_str)

            torch.cuda.synchronize()
            t0 = time.perf_counter()
            prt_str = '\nValidation TIME::: {:0.3f}\n\n'.format(t0 - tvs)
            print(prt_str)
            log_file.write(ptr_str)

    log_file.close()
示例#23
0
    from data import BaseTransform, VOC_CLASSES as labelmap
    from ssd import build_ssd

    if torch.cuda.is_available():
        if args.cuda:
            torch.set_default_tensor_type('torch.cuda.FloatTensor')
        if not args.cuda:
            print(
                "WARNING: It looks like you have a CUDA device, but aren't " +
                "using CUDA.\nRun with --cuda for optimal speed.")
            torch.set_default_tensor_type('torch.FloatTensor')
    else:
        torch.set_default_tensor_type('torch.FloatTensor')

    net = build_ssd('test', 300, 21)  # initialize SSD
    net.load_state_dict(torch.load(args.weights))
    transform = BaseTransform(net.size,
                              (104 / 256.0, 117 / 256.0, 123 / 256.0))

    fps = FPS().start()
    cv2_demo(net.eval(), transform)
    # stop the timer and display FPS information
    fps.stop()

    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # cleanup
    cv2.destroyAllWindows()
    #stream.stop()
示例#24
0
if __name__ == '__main__':
    args = parse_arg()
    #if args.dataset == 'coco_person':
    #    gt_json=os.path.expanduser('~/data/datasets/COCO/annotations/instances_val2014.json')
    #    num_classes = 2
    #    config = coco_person
    #elif args.dataset == 'modanet':
    #    num_classes = 14 # plus background
    #    pass
    if args.dataset == 'COCO_PERSON':
        dataset_root = COCO_ROOT
        cfg = configs['coco_person_{}'.format(args.size)]
        test_dataset = COCOPersonDetection(root=dataset_root,
                                           image_set='val2014',
                                           transform=BaseTransform(
                                               cfg['min_dim'], MEANS))
        gt_json = os.path.expanduser(
            '~/data/datasets/COCO/annotations/instances_val2014.json')
        just_person = True
    elif args.dataset == 'MODANET':
        cfg = configs['modanet_{}'.format(args.size)]
        print(cfg)
        h5_root = os.path.expanduser(
            '~/data/datasets/modanet/modanet_{}_{}_{}_hdf5.hdf5')
        h5_root_train = h5_root.format(cfg['min_dim'] // 2, cfg['min_dim'],
                                       'train')
        h5_root_val = h5_root.format(cfg['min_dim'] // 2, cfg['min_dim'],
                                     'val')
        test_dataset = ModanetDetectionHDF5(h5_root_val,
                                            part='val',
                                            transform=BaseTransform(
示例#25
0
            cv2.putText(frame, labelmap[i - 1], (int(pt[0]), int(pt[1])),
                        cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2,
                        cv2.LINE_AA)
            j += 1
    return frame


net = build_ssd('test')  # We create an object that is our neural network ssd.
net.load_state_dict(
    torch.load('ssd300_mAP_77.43_v2.pth',
               map_location=lambda storage, loc: storage)
)  # We get the weights of the neural network from another one that is pretrained (ssd300_mAP_77.43_v2.pth).

# Creating the transformation
transform = BaseTransform(
    net.size, (104 / 256.0, 117 / 256.0, 123 / 256.0)
)  # We create an object of the BaseTransform class, a class that will do the required transformations so that the image can be the input of the neural network.

reader = imageio.get_reader("funny_dog.mp4")
fps = reader.get_meta_data()["fps"]

writer = imageio.get_writer("output.mp4", fps=fps)

for i, frame in enumerate(
        reader):  # We iterate on the frames of the output video:
    frame = detect(
        frame, net.eval(), transform
    )  # We call our detect function (defined above) to detect the object on the frame.
    writer.append_data(frame)  # We add the next frame in the output video.
    print(i)  # We print the number of the processed frame.
writer.close(
def test():
    # get device
    if args.cuda:
        print('use cuda')
        cudnn.benchmark = True
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    # load net
    num_classes = 80
    input_size = [args.input_size, args.input_size]

    if args.dataset == 'COCO':
        testset = COCODataset(data_dir=coco_root,
                              json_file='instances_val2017.json',
                              name='val2017',
                              img_size=input_size[0],
                              transform=BaseTransform(input_size),
                              debug=args.debug)
    elif args.dataset == 'VOC':
        testset = VOCDetection(VOC_ROOT, [('2007', 'test')],
                               BaseTransform(input_size))

    # build model
    # # yolo_v3_plus series: yolo_v3_plus, yolo_v3_plus_large, yolo_v3_plus_medium, yolo_v3_plus_small
    if args.version == 'yolo_v3_plus':
        from models.yolo_v3_plus import YOLOv3Plus
        anchor_size = config.MULTI_ANCHOR_SIZE_COCO
        backbone = 'd-53'

        yolo_net = YOLOv3Plus(device,
                              input_size=input_size,
                              num_classes=num_classes,
                              conf_thresh=args.conf_thresh,
                              nms_thresh=args.nms_thresh,
                              anchor_size=anchor_size,
                              backbone=backbone,
                              diou_nms=args.diou_nms)
        print('Let us test yolo_v3_plus on the COCO dataset ......')

    elif args.version == 'yolo_v3_plus_large':
        from models.yolo_v3_plus import YOLOv3Plus
        anchor_size = config.MULTI_ANCHOR_SIZE_COCO
        backbone = 'csp-l'

        yolo_net = YOLOv3Plus(device,
                              input_size=input_size,
                              num_classes=num_classes,
                              conf_thresh=args.conf_thresh,
                              nms_thresh=args.nms_thresh,
                              anchor_size=anchor_size,
                              backbone=backbone,
                              diou_nms=args.diou_nms)
        print('Let us test yolo_v3_plus_large on the COCO dataset ......')

    elif args.version == 'yolo_v3_plus_half':
        from models.yolo_v3_plus import YOLOv3Plus
        anchor_size = config.MULTI_ANCHOR_SIZE_COCO
        backbone = 'csp-h'

        yolo_net = YOLOv3Plus(device,
                              input_size=input_size,
                              num_classes=num_classes,
                              conf_thresh=args.conf_thresh,
                              nms_thresh=args.nms_thresh,
                              anchor_size=anchor_size,
                              backbone=backbone,
                              diou_nms=args.diou_nms)
        print('Let us test yolo_v3_plus_half on the COCO dataset ......')

    elif args.version == 'yolo_v3_plus_medium':
        from models.yolo_v3_plus import YOLOv3Plus
        anchor_size = config.MULTI_ANCHOR_SIZE_COCO
        backbone = 'csp-m'

        yolo_net = YOLOv3Plus(device,
                              input_size=input_size,
                              num_classes=num_classes,
                              conf_thresh=args.conf_thresh,
                              nms_thresh=args.nms_thresh,
                              anchor_size=anchor_size,
                              backbone=backbone,
                              diou_nms=args.diou_nms)
        print('Let us test yolo_v3_plus_medium on the COCO dataset ......')

    elif args.version == 'yolo_v3_plus_small':
        from models.yolo_v3_plus import YOLOv3Plus
        anchor_size = config.MULTI_ANCHOR_SIZE_COCO
        backbone = 'csp-s'

        yolo_net = YOLOv3Plus(device,
                              input_size=input_size,
                              num_classes=num_classes,
                              conf_thresh=args.conf_thresh,
                              nms_thresh=args.nms_thresh,
                              anchor_size=anchor_size,
                              backbone=backbone,
                              diou_nms=args.diou_nms)
        print('Let us test yolo_v3_plus_small on the COCO dataset ......')

    # # yolo_v3_slim series:
    elif args.version == 'yolo_v3_slim':
        from models.yolo_v3_slim import YOLOv3Slim
        anchor_size = config.MULTI_ANCHOR_SIZE_COCO
        backbone = 'd-tiny'

        yolo_net = YOLOv3Slim(device,
                              input_size=input_size,
                              num_classes=num_classes,
                              conf_thresh=args.conf_thresh,
                              nms_thresh=args.nms_thresh,
                              anchor_size=anchor_size,
                              backbone=backbone,
                              diou_nms=args.diou_nms)
        print('Let us test yolo_v3_slim on the COCO dataset ......')

    elif args.version == 'yolo_v3_slim_csp':
        from models.yolo_v3_slim import YOLOv3Slim
        anchor_size = config.MULTI_ANCHOR_SIZE_COCO
        backbone = 'csp-slim'

        yolo_net = YOLOv3Slim(device,
                              input_size=input_size,
                              num_classes=num_classes,
                              conf_thresh=args.conf_thresh,
                              nms_thresh=args.nms_thresh,
                              anchor_size=anchor_size,
                              backbone=backbone,
                              diou_nms=args.diou_nms)
        print('Let us test yolo_v3_slim_csp on the COCO dataset ......')

    elif args.version == 'yolo_v3_slim_csp2':
        from models.yolo_v3_slim import YOLOv3Slim
        anchor_size = config.MULTI_ANCHOR_SIZE_COCO
        backbone = 'csp-tiny'

        yolo_net = YOLOv3Slim(device,
                              input_size=input_size,
                              num_classes=num_classes,
                              conf_thresh=args.conf_thresh,
                              nms_thresh=args.nms_thresh,
                              anchor_size=anchor_size,
                              backbone=backbone,
                              diou_nms=args.diou_nms)
        print('Let us test yolo_v3_slim_csp2 on the COCO dataset ......')

    # # yolo_v3_spp
    elif args.version == 'yolo_v3_spp':
        from models.yolo_v3_spp import YOLOv3SPP
        anchor_size = config.MULTI_ANCHOR_SIZE_COCO
        backbone = 'd-53'

        yolo_net = YOLOv3SPP(device,
                             input_size=input_size,
                             num_classes=num_classes,
                             conf_thresh=args.conf_thresh,
                             nms_thresh=args.nms_thresh,
                             anchor_size=anchor_size,
                             backbone=backbone,
                             diou_nms=args.diou_nms)
        print('Let us test yolo-v3-spp on the COCO dataset ......')

    else:
        print('Unknown version !!!')
        exit()

    yolo_net.load_state_dict(
        torch.load(args.trained_model, map_location='cuda'))
    yolo_net.to(device).eval()
    print('Finished loading model!')

    # evaluation
    test_net(yolo_net, device, testset, thresh=args.visual_threshold)
示例#27
0
def autolabel(args):
    # init model
    sys.path.append(os.path.dirname(os.path.dirname(
        os.path.abspath(__file__))))
    from data import BaseTransform, VOC_CLASSES as labelmap
    from ssd import SSD300
    num_classes = len(labelmap) + 1
    net = SSD300(num_classes, 'test').eval()
    net.load_state_dict(torch.load(args.weights))
    transform = BaseTransform(300, (104 / 256.0, 117 / 256.0, 123 / 256.0))

    def predict(frame):
        height, width = frame.shape[:2]
        x = torch.from_numpy(transform(frame)[0]).permute(2, 0, 1)
        x = Variable(x.unsqueeze(0))
        y = net(x)  # forward pass
        detections = y.data
        # scale each detection back up to the image
        scale = torch.Tensor([width, height, width, height])
        pts = []
        for i in range(detections.size(1)):  # 1--head, 2--body, 3--ass
            j = 0
            while detections[0, i, j, 0] >= 0.6:
                pt = (detections[0, i, j, 1:] *
                      scale).cpu().numpy().astype(int)
                pts.append([i] + pt.tolist())
                j += 1
        return pts

    def predict2(frame):
        height, width = frame.shape[:2]
        x = torch.from_numpy(transform(frame)[0]).permute(2, 0, 1)
        x = Variable(x.unsqueeze(0))
        y = net(x)  # forward pass
        detections = y.data
        # scale each detection back up to the image
        scale = torch.Tensor([width, height, width, height])
        for i in range(detections.size(1)):
            j = 0
            while detections[0, i, j, 0] >= 0.6:
                pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
                cv2.rectangle(frame, (int(pt[0]), int(pt[1])),
                              (int(pt[2]), int(pt[3])), COLORS[i % 3], 2)
                cv2.putText(frame, labelmap[i - 1], (int(pt[0]), int(pt[1])),
                            FONT, 2, (255, 255, 255), 2, cv2.LINE_AA)
                j += 1
        return frame

    fid = open('anno.txt', 'w')

    def write2file(imgname, bboxes):
        imgname = imgname.split('/')[-2] + '_' + imgname.split('/')[-1]
        fid.write(imgname)
        for bbox in bboxes:
            fid.write(' ' + str(bbox) + ' ')
        fid.write('\n')

    classes = os.listdir(args.image_dir)

    import tqdm
    t = tqdm.tqdm()
    t.total = len(classes)

    for class_id in classes:
        t.update()
        imgfiles = sorted([
            os.path.join(os.path.join(args.image_dir, class_id), x)
            for x in sorted(os.listdir(os.path.join(args.image_dir, class_id)))
            if x.endswith('.jpg')
        ])
        for imgfile in imgfiles:
            img = cv2.imread(imgfile)
            bboxes = predict(img)
            write2file(imgfile, bboxes)

            #img = predict2(img)
            #cv2.imshow('img', img)
            #ch = cv2.waitKey(0) & 0xff
            #if ch == 27: #ord('q')
            #    break
        #if ch == 27: #ord('q')
        #    break

    fid.close()
示例#28
0
                coords = (pt[0], pt[1], pt[2], pt[3])
                pred_num += 1
                with open(filename, mode='a') as f:
                    f.write(
                        str(pred_num) + ' label: ' + label_name + ' score: ' +
                        str(score) + ' ' + ' || '.join(str(c)
                                                       for c in coords) + '\n')
                j += 1


if __name__ == '__main__':
    # load net
    num_classes = len(VOC_CLASSES) + 1  # +1 background
    net = build_ssd('test', 300, num_classes)  # initialize SSD
    net.load_state_dict(torch.load(args.trained_model))
    net.eval()
    print('Finished loading model!')
    # load data
    testset = VOCDetection(args.voc_root, [('2007', 'test')], None,
                           AnnotationTransform())
    if args.cuda:
        net = net.cuda()
        cudnn.benchmark = True
    # evaluation
    test_net(args.save_folder,
             net,
             args.cuda,
             testset,
             BaseTransform(net.size, (104, 117, 123)),
             thresh=args.visual_threshold)
示例#29
0
    write_voc_results_file(box_list, dataset)
    do_python_eval(output_dir)


if __name__ == '__main__':
    # load net
    # num_classes = len(labelmap_lisa) + 1                      # +1 for background

    net = build_ssd('test', 300, 2)  # initialize SSD
    net.load_state_dict(torch.load(args.trained_model, 'cpu'))
    net.eval()
    print('Finished loading model!')
    # load data
    dataset = LISA(
        args.lisa_root,
        train=False,
        transform=BaseTransform(300, dataset_mean),
    )
    if args.cuda:
        net = net.cuda()
        cudnn.benchmark = True
    # evaluation
    test_net(args.save_folder,
             net,
             args.cuda,
             dataset,
             BaseTransform(net.size, dataset_mean),
             args.top_k,
             300,
             thresh=args.confidence_threshold)
示例#30
0
    if args.dataset == 'VOC':
        cfg = voc
    else:
        cfg = custom
        # +1 for background
    net = build_ssd(phase='test',
                    size=cfg['min_dim'],
                    num_classes=cfg['num_classes'])
    net.load_state_dict(torch.load(args.trained_model))
    net.eval()
    print('Finished loading model!')

    # Load data (TODO:  add COCO)
    if args.dataset == 'VOC':
        dataset = VOCDetection(args.dataset_root, [(set_type)],
                               BaseTransform(cfg['min_dim'], MEANS),
                               VOCAnnotationTransform())
    else:
        dataset = CustomDetection(
            root=args.dataset_root,
            image_set=[(set_type)],
            transform=BaseTransform(cfg['min_dim'], MEANS),
            target_transform=CustomAnnotationTransform(train=False))

    net = net.to(device)

    # Evaluation
    test_net(args.save_folder,
             net,
             args.cuda,
             dataset,