예제 #1
0
    def __init__(self, config):
        self.config = config
        self.exp_name = self.config.get('exp_name', None)
        if self.exp_name is None:
            self.exp_name = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")

        self.log_dir = osp.join(self.config['exp_dir'], self.exp_name, 'logs')
        self.pth_dir = osp.join(self.config['exp_dir'], self.exp_name,
                                'checkpoints')
        os.makedirs(self.log_dir, exist_ok=True)
        os.makedirs(self.pth_dir, exist_ok=True)

        self.writer = SummaryWriter(log_dir=self.log_dir)

        self.model = self._init_net()
        self.optimizer = self._init_optimizer()
        self.criterion = nn.CrossEntropyLoss().to(self.config['device'])

        self.train_loader, self.val_loader = self._init_dataloaders()

        pretrained_path = self.config.get('model_path', False)
        if pretrained_path:
            self.training_epoch, self.total_iter = load_checkpoint(
                pretrained_path, self.model, optimizer=self.optimizer)

        else:
            self.training_epoch = 0
            self.total_iter = 0

        self.epochs = self.config.get('epochs', int(1e5))
    def __init__(self, config):
        self.config = config
        self.exp_name = self.config.get('exp_name', None)
        if self.exp_name is None:
            self.exp_name = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")

        self.res_dir = osp.join(self.config['exp_dir'], self.exp_name,
                                'results')
        os.makedirs(self.res_dir, exist_ok=True)

        self.model = self._init_net()

        self.inference_loader = self._init_dataloader()

        pretrained_path = self.config.get('model_path', False)
        if pretrained_path:
            load_checkpoint(pretrained_path, self.model)
        else:
            raise Exception(
                "model_path doesnt't exist in config. Please specify checkpoint path"
            )
예제 #3
0
    def __init__(self, config):
        self.config = config
        self.exp_name = self.config.get("exp_name", None)
        if self.exp_name is None:
            self.exp_name = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")

        self.res_dir = osp.join(self.config["exp_dir"], self.exp_name,
                                "results")
        os.makedirs(self.res_dir, exist_ok=True)

        self.model = self._init_net()

        self.pipeline_loader = self._init_dataloader()

        self.mapper = json.load(open(config["mapping_json"]))
        self.mapper = {j: i for i, j in self.mapper.items()}

        pretrained_path = self.config.get("model_path", False)
        if pretrained_path:
            load_checkpoint(pretrained_path, self.model)
        else:
            raise Exception(
                "model_path doesnt't exist in config. Please specify checkpoint path",
            )
def test_model(model, args):
    assert args.batch_size == 1
    # get data list for tracking
    tracking_list_seq = []
    tracking_list = []
    batch_time = tu.AverageMeter()
    data_time = tu.AverageMeter()

    # resume from a checkpoint
    nu.load_checkpoint(model, args.resume, is_test=True)

    cudnn.benchmark = True

    dataset = Dataset(args.json_path, 'test', args.data_split,
                      args.set == 'kitti', args.percent, args.is_tracking,
                      args.is_normalizing, args.n_box_limit)

    print("Number of image to test: {}".format(dataset.__len__()))

    # Data loading code
    test_loader = DataLoader(dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.workers,
                             pin_memory=True,
                             drop_last=True)

    model.eval()

    end = time.time()
    for i, (image, box_info) in enumerate(iter(test_loader)):
        # measure data loading time
        data_time.update(time.time() - end)
        end = time.time()

        with torch.no_grad():
            box_output, targets = model(image, box_info, args.device, 'test')

        batch_time.update(time.time() - end)

        rois_gt, \
        dim_gt_, \
        alpha_gt_, \
        dep_gt_, \
        cen_gt_, \
        loc_gt_, \
        ignore_, \
        tid_gt = targets

        cam_calib = box_info['cam_calib'].cpu().data.numpy().reshape(3, 4)
        cam_rot = box_info['cam_rot'].cpu().data.numpy()
        cam_loc = box_info['cam_loc'].cpu().data.numpy()
        box_gt = rois_gt.cpu().data.numpy()
        box_pd = box_output['rois'].cpu().data.numpy()
        dim_gt = dim_gt_.cpu().data.numpy()
        dim_pd = box_output['dim'].cpu().data.numpy()
        alpha_gt = alpha_gt_.cpu().data.numpy()
        alpha_pd = nu.get_alpha(box_output['rot'].cpu().data.numpy())
        depth_gt = dep_gt_.cpu().data.numpy()
        depth_pd = box_output['dep'].cpu().data.numpy()
        center_gt = cen_gt_.cpu().data.numpy()
        center_pd = box_output['cen'].cpu().data.numpy()
        loc_gt = loc_gt_.cpu().data.numpy()
        loc_pd = box_output['loc'].cpu().data.numpy()

        feature = F.normalize(
            F.avg_pool2d(box_output['feat'], (7, 7)).view(-1, 128))
        # feature = box_output['feat']
        feature_np = feature.cpu().data.numpy()

        tracking_list.append({
            'im_path': box_info['im_path'],
            'endvid': box_info['endvid'].cpu().data.numpy(),
            'rois_pd': box_pd,
            'rois_gt': box_gt,
            'feature': feature_np,
            'dim_pd': dim_pd,
            'alpha_pd': alpha_pd,
            'depth_pd': depth_pd,
            'center_pd': center_pd,
            'loc_pd': loc_pd,
            'dim_gt': dim_gt,
            'alpha_gt': alpha_gt,
            'depth_gt': depth_gt,
            'center_gt': center_gt,
            'loc_gt': loc_gt,
            'cam_calib': cam_calib,
            'cam_rot': cam_rot,
            'cam_loc': cam_loc,
            'ignore': ignore_.cpu().data.numpy(),
            'tid_gt': tid_gt.cpu().data.numpy(),
        })

        if box_info['endvid'].cpu().data.numpy().any() \
            or i == len(test_loader):
            tracking_list_seq.append(tracking_list)
            tracking_list = []

        if i % 100 == 0 and i != 0:
            print(i)
        end = time.time()

    if args.track_name is None:
        trk_name = os.path.join(
            cfg.OUTPUT_PATH, '{}_{}_{}_bdd_roipool_output.pkl'.format(
                args.session,
                str(args.start_epoch).zfill(3), args.set))
    else:
        trk_name = os.path.join(cfg.OUTPUT_PATH, args.track_name)

    with open(trk_name, 'wb') as f:
        print("Saving {} with total {} sequences...".format(
            trk_name, len(tracking_list_seq)))
        pickle.dump(tracking_list_seq, f)
def run_training(model, args):

    if not os.path.isdir(cfg.CHECKPOINT_PATH):
        os.mkdir(cfg.CHECKPOINT_PATH)

    cudnn.benchmark = True

    if args.has_val:
        phases = ['train', 'val']
    else:
        phases = ['train']

    # Data loading code
    train_loader = {
        phase:
        DataLoader(Dataset(args.json_path, phase, phase, args.set == 'kitti',
                           args.percent, args.is_tracking, args.is_normalizing,
                           args.n_box_limit),
                   batch_size=args.batch_size,
                   shuffle=(phase == 'train'),
                   num_workers=args.workers,
                   pin_memory=True,
                   drop_last=True)
        for phase in phases
    }

    # Optimizer
    if args.adaptBN:
        model_param = list()
        for m in model.modules():
            if isinstance(m, nn.BatchNorm2d):
                model_param.append({'params': list(m.parameters())})
        lr = 0.0
    else:
        # model_param = model.parameters()
        model_param = filter(lambda p: p.requires_grad, model.parameters())
        lr = args.lr

    if args.optim == 'sgd':
        optimizer = torch.optim.SGD(model_param,
                                    lr,
                                    momentum=args.momentum,
                                    nesterov=True,
                                    weight_decay=args.weight_decay)
    elif args.optim == 'adam':
        optimizer = torch.optim.Adam(model_param,
                                     lr,
                                     weight_decay=args.weight_decay,
                                     amsgrad=True)

    # optionally resume from a checkpoint
    if args.resume:
        nu.load_checkpoint(model, args.resume, optimizer=optimizer)

    # switch to train mode
    if args.adaptBN:
        model.eval()
        for m in model.modules():
            if isinstance(m, nn.BatchNorm2d):
                m.train()
                for p in m.parameters():
                    p.requires_grad = False

    if args.use_tfboard:
        from tensorboardX import SummaryWriter

        logger = SummaryWriter("logs")
    else:
        logger = None

    for epoch in range(args.start_epoch, args.epochs):
        # Resume with normal lr adjust
        # not to over suppress the lr due to large epoch
        # if args.optim == 'sgd':
        nu.adjust_learning_rate(args, optimizer, epoch)

        # train for one epoch
        for phase in phases:
            if phase == 'train':
                model.train()
                # nu.freeze_model(model.module.base)
                # nu.freeze_model(model.module.rot)
                # nu.freeze_model(model.module.dep)
                # nu.freeze_model(model.module.dim)
                train_model(args, train_loader[phase], model, optimizer, epoch,
                            phase, logger)
                acc = 0.0
            else:
                model.eval()
                acc = val_model(args, train_loader[phase], model, epoch, phase,
                                logger)

            # Save checkpoint
            nu.save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'best_score': acc,
                    'phase': phase,
                    'save_path': cfg.CHECKPOINT_PATH,
                }, args.set == 'kitti', args.session, args.check_freq)

    if args.use_tfboard:
        logger.close()

    print("Training finished!!")