Exemplo n.º 1
0
 def test_got10k(self):
     root_dir = os.path.join(self.data_dir, 'GOT-10k')
     # without meta
     for subset in ['train', 'val', 'test']:
         dataset = GOT10k(root_dir, subset=subset)
         self._check_dataset(dataset)
     # with meta
     for subset in ['train', 'val', 'test']:
         dataset = GOT10k(root_dir, subset=subset, return_meta=True)
         self._check_dataset(dataset)
Exemplo n.º 2
0
 def __init__(self, root, subset="val") -> None:
     assert subset in ['val', "test"], 'Unknown subset.'
     cur_path = os.path.dirname(os.path.realpath(__file__))
     self.root = os.path.join(cur_path, '../../', root) if not os.path.isabs(root) else root
     self.is_testing = (subset != "val")
     self.dataset = GOT10k(self.root , subset=subset, return_meta=self.is_testing)
     (self.transforms_img, _), self.transform_norm = make_transforms(subset)
Exemplo n.º 3
0
def main(instanc_size=511,
         num_threads=12,
         input_dir="got10k",
         output_dir="./"):
    crop_path = join(output_dir, 'crop{:d}'.format(instanc_size))
    if not isdir(crop_path): makedirs(crop_path)

    for subset in ['val', 'train']:
        dataset = GOT10k(root_dir=input_dir, subset=subset, return_meta=True)
        set_crop_base_path = join(crop_path, subset)  # crop511/val

        n_videos = len(dataset)
        with futures.ProcessPoolExecutor(max_workers=num_threads) as executor:
            fs = [
                executor.submit(crop_img,
                                img_files,
                                anno,
                                meta,
                                set_crop_base_path,
                                dataset.seq_names[s],
                                instanc_size=instanc_size)
                for s, (img_files, anno, meta) in enumerate(dataset)
            ]
            for i, f in enumerate(futures.as_completed(fs)):
                printProgress(i,
                              n_videos,
                              prefix=subset,
                              suffix='Done ',
                              barLength=40)

    print('done')
Exemplo n.º 4
0
    def __init__(self,
                 root_dir,
                 subset,
                 report_dir,
                 list_file=None,
                 benchmark='all'):
        self.subset = subset
        self.names = ['GOT-10k', 'OTB2015']
        self.datasets = [
            GOT10k(os.path.join(root_dir, self.names[0]), subset=subset),
            OTB(os.path.join(root_dir, self.names[1]), 2015)
        ]
        self.report_dir = os.path.join(report_dir, 'FR')
        self.theta = .1

        if benchmark == self.names[0]:
            self.datasets = [self.datasets[0]]
            self.names = [self.names[0]]
        elif benchmark == self.names[1]:
            self.datasets = [self.datasets[1]]
            self.names = [self.names[1]]

        self.dict = {
            name: {
                'total': {},
                'seq_wise': {}
            }
            for name in self.names
        }
Exemplo n.º 5
0
def get_data_from_one_series(num):
    imgs = []
    dataset = GOT10k(root_dir='../GOT-10k', subset='test')
    img_file, anno = dataset[num]
    for i in range(10):
        img = cv2.imread(img_file[i])
        imgs.append(img)
    return imgs, anno[0, :]
Exemplo n.º 6
0
 def test_identity_tracker(self):
     # setup dataset
     root_dir = os.path.join(self.data_dir, 'GOT-10k')
     dataset = GOT10k(root_dir, subset='val')
     # run experiment
     img_files, anno = random.choice(dataset)
     boxes, times = self.tracker.track(img_files, anno[0], visualize=True)
     self.assertEqual(boxes.shape, anno.shape)
     self.assertEqual(len(times), len(anno))
Exemplo n.º 7
0
def test_got10k():
    dataset = GOT10k(root_dir='../GOT10K', subset='train')
    for i in range(len(dataset)):
        try:
            filename = dataset[i]
            for j in filename[0]:
                if os.path.exists(j) == False:
                    print("Error:{}".format(i))
                    break
        except Exception:
            print(i)
Exemplo n.º 8
0
 def __init__(self, name, root, subset, frame_per_video, start_idx) -> None:
     super().__init__()
     cur_path = os.path.dirname(os.path.realpath(__file__))
     self.name = name
     self.root = os.path.join(cur_path, '../../', root) if not os.path.isabs(root) else root
     assert subset in ['train', 'val'], 'Unknown subset.'
     self.subset = subset
     self.dataset = GOT10k(self.root , subset=subset, return_meta=True)
     self.length = len(self.dataset) * frame_per_video
     self.start_idx = start_idx
     self.indices = np.random.permutation(len(self.dataset))
     (self.template_transforms, self.search_transforms), self.norm_transform = make_transforms(subset)
     self.ignore = sorted([1204,4224,4418,7787,7964,9171,9176]) if subset == "train" else []
Exemplo n.º 9
0
def example_loop_dataset():
    # setup dataset
    dataset = GOT10k(ROOT_DIR, subset='val')

    # loop over the complete dataset
    for s, (img_files, anno) in enumerate(dataset):
        seq_name = dataset.seq_names[s]
        print('Sequence:', seq_name)

        for f, img_file in enumerate(img_files):
            image = Image.open(img_file)
            box = anno[f, :]  # (left, top, width, height)
            show_frame(image, box, colors='w')
Exemplo n.º 10
0
 def __init__(self,
              root_dir,
              subset='val',
              result_dir='results',
              report_dir='reports',
              use_dataset=True):
     super(ExperimentGOT10k, self).__init__()
     assert subset in ['val', 'test']
     self.subset = subset
     if use_dataset:
         self.dataset = GOT10k(root_dir, subset=subset)
     self.result_dir = os.path.join(result_dir, 'GOT-10k')
     self.report_dir = os.path.join(report_dir, 'GOT-10k')
     self.nbins_iou = 101
     self.repetitions = 3
Exemplo n.º 11
0
    def _init_pairwise_dataset(dataset_type: DatasetType, dir_path: str,
                               **kwargs) -> SiamesePairwiseDataset:
        if dataset_type == DatasetType.GOT10k:
            data_seq = GOT10k(root_dir=dir_path, **kwargs)
        elif dataset_type == DatasetType.OTB13:
            data_seq = OTB(root_dir=dir_path, version=2013, **kwargs)
        elif dataset_type == DatasetType.OTB15:
            data_seq = OTB(root_dir=dir_path, version=2015, **kwargs)
        elif dataset_type == DatasetType.VOT15:
            data_seq = VOT(dir_path, version=2015, **kwargs)
        elif dataset_type == DatasetType.ILSVRC15:
            data_seq = ImageNetVID(root_dir=dir_path, subset='train', **kwargs)
        else:
            raise ValueError(f"unsupported dataset type: {dataset_type}")

        pairwise_dataset = SiamesePairwiseDataset(cast(Sequence, data_seq),
                                                  TrackerConfig())

        return pairwise_dataset
Exemplo n.º 12
0
 def __init__(self, args, data_subset="train"):
     seqs = GOT10k(args.data_path, subset=data_subset, return_meta=True)
     self.cfg = args.cfg
     pair_transform = SiamFCTransforms(exemplar_sz=self.cfg["exemplar_sz"],
                                       instance_sz=self.cfg["instance_sz"],
                                       context=self.cfg["context"])
     if data_subset == "train":
         transform = transforms.Compose([
             transforms.RandomApply([transforms.Lambda(fliplr)], 0.5),
             pt_util.ToTensor(scale=255),
             transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                  std=[0.229, 0.224, 0.225]),
         ])
     else:
         transform = transforms.Compose([
             pt_util.ToTensor(scale=255),
             transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                  std=[0.229, 0.224, 0.225]),
         ])
     super(GOT10kDataset, self).__init__(args, seqs, data_subset,
                                         pair_transform, transform)
Exemplo n.º 13
0
    def __init__(self,
                 root_dir,
                 subset,
                 report_dir,
                 list_file=None,
                 benchmark='all'):
        self.subset = subset
        self.names = ['GOT-10k', 'OTB2015']
        self.datasets = [
            GOT10k(os.path.join(root_dir, self.names[0]), subset=subset),
            OTB(os.path.join(root_dir, self.names[1]), 2015)
        ]
        self.report_dir = os.path.expanduser(report_dir)
        self.theta = .4

        if benchmark == self.names[0]:
            self.datasets = [self.datasets[0]]
            self.names = [self.names[0]]
        elif benchmark == self.names[1]:
            self.datasets = [self.datasets[1]]
            self.names = [self.names[1]]

        self.colors = ['r', 'y']
        self.cd = 50
Exemplo n.º 14
0
from pysot.utils.bbox import get_axis_aligned_bbox
from toolkit.datasets import DatasetFactory
from matplotlib.image import imread

from PIL import Image
from got10k.datasets import GOT10k
from got10k.utils.viz import show_frame

if __name__ == '__main__':
    cfg.merge_from_file('/home/sourabhswain/Documents/SiamRPN/config.yaml')
    net_path = '/home/sourabhswain/Documents/SiamRPN/model.pth'
    model = ModelBuilder()
    model = load_pretrain(model, net_path).cuda().eval()
    tracker = build_tracker(model)

    dataset = GOT10k(root_dir='/home/sourabhswain/Documents/SiamRPN/dataset',
                     subset='val')

    #dataset = DatasetFactory.create_dataset(name='GOT-10k',
    #                                        dataset_root='/home/sourabhswain/Documents/SiamRPN/dataset',
    #                                        load_img=False)
    """
    for v_idx, (video, anno) in enumerate(dataset):


        toc = 0
        pred_bboxes = []
        scores = []
        track_times = []


        #Access all frames
Exemplo n.º 15
0
def main():
    '''parameter initialization'''
    args = parser.parse_args()
    exp_name_dir = AverageMeter.experiment_name_dir(args.experiment_name)
    '''model on gpu'''
    model = TrackerSiamRPN()
    '''setup train data loader'''
    name = 'All'
    assert name in ['VID', 'GOT-10k', 'All']
    if name == 'GOT-10k':
        root_dir = args.train_path
        seq_dataset = GOT10k(root_dir, subset='train')
    elif name == 'VID':
        root_dir = '/home/arbi/desktop/ILSVRC2017_VID/ILSVRC'
        seq_dataset = ImageNetVID(root_dir, subset=('train'))
    elif name == 'All':
        root_dir_vid = '/home/arbi/desktop/ILSVRC2017_VID/ILSVRC'
        seq_datasetVID = ImageNetVID(root_dir_vid, subset=('train'))
        root_dir_got = args.train_path
        seq_datasetGOT = GOT10k(root_dir_got, subset='train')
        seq_dataset = util.data_split(seq_datasetVID, seq_datasetGOT)
    print('seq_dataset', len(seq_dataset))

    train_data = TrainDataLoader(seq_dataset, name)
    train_loader = DataLoader(dataset=train_data,
                              batch_size=1,
                              shuffle=True,
                              num_workers=16,
                              pin_memory=True)
    '''setup val data loader'''
    name = 'All'
    assert name in ['VID', 'GOT-10k', 'All']
    if name == 'GOT-10k':
        root_dir = args.train_path
        seq_dataset_val = GOT10k(root_dir, subset='val')
    elif name == 'VID':
        root_dir = '/home/arbi/desktop/ILSVRC2017_VID/ILSVRC'
        seq_dataset_val = ImageNetVID(root_dir, subset=('val'))
    elif name == 'All':
        root_dir_vid = '/home/arbi/desktop/ILSVRC2017_VID/ILSVRC'
        seq_datasetVID = ImageNetVID(root_dir_vid, subset=('val'))
        root_dir_got = args.train_path
        seq_datasetGOT = GOT10k(root_dir_got, subset='val')
        seq_dataset_val = util.data_split(seq_datasetVID, seq_datasetGOT)
    print('seq_dataset_val', len(seq_dataset_val))

    val_data = TrainDataLoader(seq_dataset_val, name)
    val_loader = DataLoader(dataset=val_data,
                            batch_size=1,
                            shuffle=True,
                            num_workers=16,
                            pin_memory=True)
    '''load weights'''
    init_weights(model)

    if not args.checkpoint_path == None:
        assert os.path.isfile(
            args.checkpoint_path), '{} is not valid checkpoint_path'.format(
                args.checkpoint_path)
        try:
            model.net.load_state_dict(
                torch.load(args.checkpoint_path,
                           map_location=lambda storage, loc: storage))
            print('You are loading the model.load_state_dict')
        except:
            init_weights(model)
    '''train phase'''
    closses, rlosses, tlosses, steps = AverageMeter(), AverageMeter(
    ), AverageMeter(), AverageMeter()

    for epoch in range(config.epoches):
        print('Train epoch {}/{}'.format(epoch + 1, config.epoches))
        with tqdm(total=config.train_epoch_size) as progbar:
            for i, dataset in enumerate(train_loader):

                closs, rloss, loss, cur_lr = model.step(epoch,
                                                        dataset,
                                                        backward=True)

                closs_ = closs.cpu().item()

                if np.isnan(closs_):
                    sys.exit(0)

                closses.update(closs.cpu().item())
                rlosses.update(rloss.cpu().item())
                tlosses.update(loss.cpu().item())

                progbar.set_postfix(closs='{:05.3f}'.format(closses.avg),
                                    rloss='{:05.3f}'.format(rlosses.avg),
                                    tloss='{:05.3f}'.format(tlosses.avg))

                progbar.update()

                if i >= config.train_epoch_size - 1:
                    '''save plot'''
                    closses.closs_array.append(closses.avg)
                    rlosses.rloss_array.append(rlosses.avg)
                    tlosses.loss_array.append(tlosses.avg)
                    steps.update(steps.count)
                    steps.steps_array.append(steps.count)

                    steps.plot(exp_name_dir)
                    '''save model'''
                    model.save(model, exp_name_dir, epoch)

                    break
Exemplo n.º 16
0
 def __init__(self, T, Radius):
     self.dataset = GOT10k(root_dir='../GOT-10k', subset='val')
     self.frame_distance = T
     self.frame_num = len(self.dataset)
     self.raduis = Radius
Exemplo n.º 17
0
        self._pick_img_pairs(index)
        self.open()

        self._tranform()

        regression_target, conf_target = self._target()
        self.count += 1

        return self.ret['train_z_transforms_rgb'], \
               self.ret['train_x_transforms_rgb'], \
               self.ret['train_z_transforms_ir'], \
               self.ret['train_x_transforms_ir'], \
               regression_target, \
               conf_target.astype(np.int64)

    def __len__(self):
        return config.train_epoch_size * 64


if __name__ == "__main__":

    root_dir = '/home/krautsct/RGB-T234'
    seq_dataset_rgb = GOT10k(root_dir, subset='train_i')
    seq_dataset_i = GOT10k(root_dir, subset='train_i', visible=False)
    train_z_transforms = transforms.Compose([ToTensor()])
    train_x_transforms = transforms.Compose([ToTensor()])
    train_data = TrainDataLoaderRGBT(seq_dataset_rgb, seq_dataset_i,
                                     train_z_transforms, train_x_transforms)
    res = train_data.__getitem__(180)
    print(res)
Exemplo n.º 18
0
def train(data_dir, resume_path=None, vis_port=None, init=None):

    #-----------------------
    name = 'GOT-10k'
    seq_dataset_train = GOT10k(data_dir, subset='train')
    seq_dataset_val = GOT10k(data_dir, subset='val')
    print('seq_dataset_train', len(seq_dataset_train))  # train-9335 个文件

    # define transforms
    train_z_transforms = transforms.Compose([ToTensor()])
    train_x_transforms = transforms.Compose([ToTensor()])
    valid_z_transforms = transforms.Compose([ToTensor()])
    valid_x_transforms = transforms.Compose([ToTensor()])

    # create dataset
    # -----------------------------------------------------------------------------------------------------
    # train_dataset = ImagnetVIDDataset(db, train_videos, data_dir, train_z_transforms, train_x_transforms)
    train_dataset = GOT10kDataset(seq_dataset_train, train_z_transforms,
                                  train_x_transforms, name)

    valid_dataset = GOT10kDataset(seq_dataset_val, valid_z_transforms,
                                  valid_x_transforms, name)

    anchors = train_dataset.anchors

    # create dataloader

    trainloader = DataLoader(dataset=train_dataset,
                             batch_size=config.train_batch_size,
                             shuffle=True,
                             num_workers=config.train_num_workers,
                             pin_memory=True,
                             drop_last=True)

    validloader = DataLoader(dataset=valid_dataset,
                             batch_size=config.valid_batch_size,
                             shuffle=False,
                             pin_memory=True,
                             num_workers=config.valid_num_workers,
                             drop_last=True)

    # create summary writer
    if not os.path.exists(config.log_dir):
        os.mkdir(config.log_dir)
    summary_writer = SummaryWriter(config.log_dir)
    if vis_port:
        vis = visual(port=vis_port)

    # start training
    # -----------------------------------------------------------------------------------------------------#
    model = SiamRPNNet(backbone=ResNet22(), head=SiamRPN())  #

    model = model.cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=config.lr,
                                momentum=config.momentum,
                                weight_decay=config.weight_decay)

    #load model weight
    # -----------------------------------------------------------------------------------------------------#
    start_epoch = 1
    if resume_path and init:  #不加载optimizer
        print("init training with checkpoint %s" % resume_path + '\n')
        print(
            '------------------------------------------------------------------------------------------------ \n'
        )
        checkpoint = torch.load(resume_path)
        if 'model' in checkpoint.keys():
            model.load_state_dict(checkpoint['model'])
        else:
            model_dict = model.state_dict()  #获取网络参数
            model_dict.update(checkpoint)  #更新网络参数
            model.load_state_dict(model_dict)  #加载网络参数
        del checkpoint
        torch.cuda.empty_cache()  #清空缓存
        print("inited checkpoint")
    elif resume_path and not init:  #获取某一个checkpoint恢复训练
        print("loading checkpoint %s" % resume_path + '\n')
        print(
            '------------------------------------------------------------------------------------------------ \n'
        )
        checkpoint = torch.load(resume_path)
        start_epoch = checkpoint['epoch'] + 1
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        del checkpoint
        torch.cuda.empty_cache()  #缓存清零
        print("loaded checkpoint")
    elif not resume_path and config.pretrained_model:  #加载预习训练模型
        print("loading pretrained model %s" % config.pretrained_model + '\n')
        print(
            '------------------------------------------------------------------------------------------------ \n'
        )
        checkpoint = torch.load(config.pretrained_model)
        # change name and load parameters
        # checkpoint = {k.replace('features.features', 'featureExtract'): v for k, v in checkpoint.items()}
        model_dict = model.state_dict()
        model_dict.update(checkpoint)
        model.load_state_dict(model_dict)

    # print(model.featureExtract[:10])

    if torch.cuda.device_count() > 1:  #如果有两块GPU,则开启多GPU模式
        model = nn.DataParallel(model)

    for epoch in range(start_epoch, config.epoch + 1):
        train_loss = []
        model.train()  # 训练模式

        # 没有固定前面的模型
        # if config.fix_former_3_layers: #True,固定模型的前10层参数不变
        #     if torch.cuda.device_count() > 1: #多GPU
        #         freeze_layers(model.module)
        #     else: # 单GPU
        #         freeze_layers(model)

        loss_temp_cls = 0
        loss_temp_reg = 0
        for i, data in enumerate(tqdm(trainloader)):
            exemplar_imgs, instance_imgs, regression_target, conf_target = data
            # conf_target (8,1125) (8,225x5)
            regression_target, conf_target = regression_target.cuda(
            ), conf_target.cuda()
            #pre_score=64,10,19,19 ; pre_regression=[64,20,19,19]
            pred_score, pred_regression = model(exemplar_imgs.cuda(),
                                                instance_imgs.cuda())
            # [64, 5x19x19, 2]=[64,1805,2]
            pred_conf = pred_score.reshape(
                -1, 2, config.anchor_num * config.score_size *
                config.score_size).permute(0, 2, 1)
            #[64,5x19x19,4] =[64,1805,4]
            pred_offset = pred_regression.reshape(
                -1, 4, config.anchor_num * config.score_size *
                config.score_size).permute(0, 2, 1)
            cls_loss = rpn_cross_entropy_balance(pred_conf,
                                                 conf_target,
                                                 config.num_pos,
                                                 config.num_neg,
                                                 anchors,
                                                 ohem_pos=config.ohem_pos,
                                                 ohem_neg=config.ohem_neg)
            reg_loss = rpn_smoothL1(pred_offset,
                                    regression_target,
                                    conf_target,
                                    config.num_pos,
                                    ohem=config.ohem_reg)
            loss = cls_loss + config.lamb * reg_loss  #分类权重和回归权重
            optimizer.zero_grad()  #梯度
            loss.backward()
            torch.nn.utils.clip_grad_norm_(
                model.parameters(),
                config.clip)  #config.clip=10 ,clip_grad_norm_梯度裁剪,防止梯度爆炸
            optimizer.step()

            step = (epoch - 1) * len(trainloader) + i
            summary_writer.add_scalar('train/cls_loss', cls_loss.data, step)
            summary_writer.add_scalar('train/reg_loss', reg_loss.data, step)
            train_loss.append(loss.detach().cpu()
                              )  #当前计算图中分离下来的,但是仍指向原变量的存放位置,requires_grad=false
            loss_temp_cls += cls_loss.detach().cpu().numpy()
            loss_temp_reg += reg_loss.detach().cpu().numpy()
            # if vis_port:
            #     vis.plot_error({'rpn_cls_loss': cls_loss.detach().cpu().numpy().ravel()[0],
            #                     'rpn_regress_loss': reg_loss.detach().cpu().numpy().ravel()[0]}, win=0)
            if (i + 1) % config.show_interval == 0:
                #if (i + 1) % 5 == 0:
                tqdm.write(
                    "[epoch %2d][iter %4d] cls_loss: %.4f, reg_loss: %.4f lr: %.2e"
                    % (epoch, i, loss_temp_cls / config.show_interval,
                       loss_temp_reg / config.show_interval,
                       optimizer.param_groups[0]['lr']))
                loss_temp_cls = 0
                loss_temp_reg = 0
                # if vis_port:
                #     anchors_show = train_dataset.anchors#[1805,4]
                #     exem_img = exemplar_imgs[0].cpu().numpy().transpose(1, 2, 0)#[127,127,3]
                #     inst_img = instance_imgs[0].cpu().numpy().transpose(1, 2, 0)#ans[271,271,3] #h,w,c

                #     # show detected box with max score
                #     topk = config.show_topK# topK=3
                #     vis.plot_img(exem_img.transpose(2, 0, 1), win=1, name='exemple')
                #     cls_pred = conf_target[0]#cls_pred=[1805]   conf_target存储的是真实的标签
                #     gt_box = get_topk_box(cls_pred, regression_target[0], anchors_show)[0]#只显示第一个gt-box

                #     # show gt_box
                #     img_box = add_box_img(inst_img, gt_box, color=(255, 0, 0))
                #     vis.plot_img(img_box.transpose(2, 0, 1), win=2, name='instance')#c,h,w

                #     # show anchor with max cls—score
                #     cls_pred = F.softmax(pred_conf, dim=2)[0, :, 1]
                #     scores, index = torch.topk(cls_pred, k=topk)
                #     img_box = add_box_img(inst_img, anchors_show[index.cpu()])
                #     img_box = add_box_img(img_box, gt_box, color=(255, 0, 0))
                #     vis.plot_img(img_box.transpose(2, 0, 1), win=3, name='anchor_max_score')
                #     # show pred box and gt-box

                #     cls_pred = F.softmax(pred_conf, dim=2)[0, :, 1]
                #     topk_box = get_topk_box(cls_pred, pred_offset[0], anchors_show, topk=topk)#
                #     img_box = add_box_img(inst_img, topk_box)
                #     img_box = add_box_img(img_box, gt_box, color=(255, 0, 0))
                #     vis.plot_img(img_box.transpose(2, 0, 1), win=4, name='box_max_score')

                #     # show anchor and gt-box with max iou
                #     iou = compute_iou(anchors_show, gt_box).flatten()#计算anchor和gt-box的iou
                #     index = np.argsort(iou)[-topk:]#argsort对iou元素从小到大排列,返回对应的index,并取最大的三个index
                #     img_box = add_box_img(inst_img, anchors_show[index])
                #     img_box = add_box_img(img_box, gt_box, color=(255, 0, 0))
                #     vis.plot_img(img_box.transpose(2, 0, 1), win=5, name='anchor_max_iou')

                #     # detected box
                #     regress_offset = pred_offset[0].cpu().detach().numpy()
                #     topk_offset = regress_offset[index, :]
                #     anchors_det = anchors_show[index, :]
                #     pred_box = box_transform_inv(anchors_det, topk_offset)
                #     img_box = add_box_img(inst_img, pred_box)
                #     img_box = add_box_img(img_box, gt_box, color=(255, 0, 0))
                #     vis.plot_img(img_box.transpose(2, 0, 1), win=6, name='box_max_iou')

        train_loss = np.mean(train_loss)

        valid_loss = []
        # model.eval()
        # for i, data in enumerate(tqdm(validloader)):
        #     exemplar_imgs, instance_imgs, regression_target, conf_target = data

        #     regression_target, conf_target = regression_target.cuda(), conf_target.cuda()

        #     pred_score, pred_regression = model(exemplar_imgs.cuda(), instance_imgs.cuda())

        #     pred_conf = pred_score.reshape(-1, 2, config.anchor_num * config.score_size * config.score_size).permute(0,
        #                                                                                                              2,
        #                                                                                                              1)
        #     pred_offset = pred_regression.reshape(-1, 4,
        #                                           config.anchor_num * config.score_size * config.score_size).permute(0,
        #                                                                                                              2,
        #                                                                                                              1)
        #     cls_loss = rpn_cross_entropy_balance(pred_conf, conf_target, config.num_pos, config.num_neg, anchors,
        #                                          ohem_pos=config.ohem_pos, ohem_neg=config.ohem_neg)
        #     reg_loss = rpn_smoothL1(pred_offset, regression_target, conf_target, config.num_pos, ohem=config.ohem_reg)
        #     loss = cls_loss + config.lamb * reg_loss
        #     valid_loss.append(loss.detach().cpu())
        # valid_loss = np.mean(valid_loss)

        valid_loss = 0

        print("EPOCH %d valid_loss: %.4f, train_loss: %.4f" %
              (epoch, valid_loss, train_loss))

        summary_writer.add_scalar('valid/loss', valid_loss,
                                  (epoch + 1) * len(trainloader))

        adjust_learning_rate(
            optimizer, config.gamma
        )  # adjust before save, and it will be epoch+1's lr when next load

        if epoch % config.save_interval == 0:
            if not os.path.exists('./models/'):
                os.makedirs("./models/")
            save_name = "./models/siamrpnres22_{}.pth".format(epoch)
            #new_state_dict = model.state_dict()
            if torch.cuda.device_count() > 1:  # 多GPU训练
                new_state_dict = model.module.state_dict()
            else:  #单GPU训练
                new_state_dict = model.state_dict()
            torch.save(
                {
                    'epoch': epoch,
                    'model': new_state_dict,
                    'optimizer': optimizer.state_dict(),
                }, save_name)
            print('save model: {}'.format(save_name))
Exemplo n.º 19
0
def main():
    '''parameter initialization'''
    args = parser.parse_args()
    exp_name_dir = util.experiment_name_dir(args.experiment_name)
    '''model on gpu'''
    model = TrackerSiamRPN()
    model.net.init_weights()
    '''setup train data loader'''
    name = 'VID'
    assert name in ['VID', 'GOT-10k', 'All']
    if name == 'GOT-10k':
        root_dir = args.train_path
        seq_dataset = GOT10k(root_dir, subset='val')
    elif name == 'VID':
        root_dir = '/home/arbi/desktop/ILSVRC2017_VID'
        seq_dataset = ImageNetVID(root_dir, subset=('train', 'val'))
    elif name == 'All':
        root_dir_vid = '/home/arbi/desktop/ILSVRC2017_VID'
        seq_datasetVID = ImageNetVID(root_dir_vid, subset=('train'))
        root_dir_got = args.train_path
        seq_datasetGOT = GOT10k(root_dir_got, subset='train')
        seq_dataset = util.data_split(seq_datasetVID, seq_datasetGOT)
    print('seq_dataset', len(seq_dataset))

    train_data = TrainDataLoader(seq_dataset, name)
    train_loader = DataLoader(dataset=train_data,
                              batch_size=64,
                              shuffle=True,
                              num_workers=16,
                              pin_memory=True)
    '''setup val data loader'''
    name = 'GOT-10k'
    assert name in ['VID', 'GOT-10k', 'All']
    if name == 'GOT-10k':
        root_dir = args.train_path
        seq_dataset_val = GOT10k(root_dir, subset='val')
    elif name == 'VID':
        root_dir = '/home/arbi/desktop/ILSVRC2017_VID'
        seq_dataset_val = ImageNetVID(root_dir, subset=('val'))
    elif name == 'All':
        root_dir_vid = '/home/arbi/desktop/ILSVRC2017_VID/ILSVRC'
        seq_datasetVID = ImageNetVID(root_dir_vid, subset=('val'))
        root_dir_got = args.train_path
        seq_datasetGOT = GOT10k(root_dir_got, subset='val')
        seq_dataset_val = util.data_split(seq_datasetVID, seq_datasetGOT)
    print('seq_dataset_val', len(seq_dataset_val))

    val_data = TrainDataLoader(seq_dataset_val, name)
    val_loader = DataLoader(dataset=val_data,
                            batch_size=8,
                            shuffle=False,
                            num_workers=16,
                            pin_memory=True)
    '''load weights'''

    if not args.checkpoint_path == None:
        assert os.path.isfile(
            args.checkpoint_path), '{} is not valid checkpoint_path'.format(
                args.checkpoint_path)
        checkpoint = torch.load(args.checkpoint_path, map_location='cpu')
        if 'model' in checkpoint.keys():
            model.net.load_state_dict(
                torch.load(args.checkpoint_path, map_location='cpu')['model'])
        else:
            model.net.load_state_dict(
                torch.load(args.checkpoint_path, map_location='cpu'))
        #model.net.load_state_dict(torch.load(args.checkpoint_path, map_location=lambda storage, loc: storage))
        print('You are loading the model.load_state_dict')

    elif config.pretrained_model:
        #print("init with pretrained checkpoint %s" % config.pretrained_model + '\n')
        #print('------------------------------------------------------------------------------------------------ \n')
        checkpoint = torch.load(config.pretrained_model)
        # change name and load parameters
        checkpoint = {
            k.replace('features.features', 'featureExtract'): v
            for k, v in checkpoint.items()
        }
        model_dict = model.net.state_dict()
        model_dict.update(checkpoint)
        model.net.load_state_dict(model_dict)

    torch.cuda.empty_cache()
    '''train phase'''
    train_closses, train_rlosses, train_tlosses = AverageMeter(), AverageMeter(
    ), AverageMeter()
    val_closses, val_rlosses, val_tlosses = AverageMeter(), AverageMeter(
    ), AverageMeter()

    train_val_plot = SavePlot(exp_name_dir, 'train_val_plot')

    for epoch in range(config.epoches):
        model.net.train()
        if config.fix_former_3_layers:
            if 1 > 1:
                util.freeze_layers(model.net.module)
            else:
                util.freeze_layers(model.net)
        print('Train epoch {}/{}'.format(epoch + 1, config.epoches))
        train_loss = []
        with tqdm(total=config.train_epoch_size) as progbar:
            for i, dataset in enumerate(train_loader):

                closs, rloss, loss = model.step(epoch, dataset, train=True)

                closs_ = closs.cpu().item()

                if np.isnan(closs_):
                    sys.exit(0)

                train_closses.update(closs.cpu().item())
                train_rlosses.update(rloss.cpu().item())
                train_tlosses.update(loss.cpu().item())

                progbar.set_postfix(closs='{:05.3f}'.format(train_closses.avg),
                                    rloss='{:05.3f}'.format(train_rlosses.avg),
                                    tloss='{:05.3f}'.format(train_tlosses.avg))

                progbar.update()
                train_loss.append(train_tlosses.avg)

                if i >= config.train_epoch_size - 1:
                    '''save plot'''
                    #train_val_plot.update(train_tlosses.avg, train_label = 'total loss')
                    '''save model'''
                    model.save(model, exp_name_dir, epoch)

                    break

        train_loss = np.mean(train_loss)
        '''val phase'''
        val_loss = []
        with tqdm(total=config.val_epoch_size) as progbar:
            print('Val epoch {}/{}'.format(epoch + 1, config.epoches))
            for i, dataset in enumerate(val_loader):

                val_closs, val_rloss, val_tloss = model.step(epoch,
                                                             dataset,
                                                             train=False)

                closs_ = val_closs.cpu().item()

                if np.isnan(closs_):
                    sys.exit(0)

                val_closses.update(val_closs.cpu().item())
                val_rlosses.update(val_rloss.cpu().item())
                val_tlosses.update(val_tloss.cpu().item())

                progbar.set_postfix(closs='{:05.3f}'.format(val_closses.avg),
                                    rloss='{:05.3f}'.format(val_rlosses.avg),
                                    tloss='{:05.3f}'.format(val_tlosses.avg))

                progbar.update()

                val_loss.append(val_tlosses.avg)

                if i >= config.val_epoch_size - 1:
                    break

        val_loss = np.mean(val_loss)
        train_val_plot.update(train_loss, val_loss)
        print('Train loss: {}, val loss: {}'.format(train_loss, val_loss))
Exemplo n.º 20
0
    p.add_argument('--seq_n', help='specific sequence to train on', type=int)
    p.add_argument('--epoch_n',
                   help='amount of epochs to train over',
                   type=int)
    p.add_argument('--gram', help='add gram regularization', type=int)
    p.add_argument('--subset', help='subset to train on', type=str)
    p.add_argument('--ablation',
                   help='train siamfc as ablation case',
                   type=int)
    args = p.parse_args(sys.argv[1:])

    start = time.time()

    # setup dataset and tracker
    root_dir = os.path.expanduser(args.root)
    seq_dataset = GOT10k(root_dir, subset=args.subset, return_meta=False)

    if args.model == 'siamfc':
        netpath = os.path.expanduser(args.weights) if args.weights else None
        tracker = TrackerSiamFC(backbone=SiamFC(), netpath=netpath)
        if not args.seq_n and not args.ablation:
            seq_dataset = Pairwise(seq_dataset)
        elif not args.seq_n and args.ablation:
            seq_dataset = Sequential(seq_dataset, n=args.seq_len, max_drift=0)
        else:
            seq_dataset = OnePairwise(seq_dataset, seq_n=args.seq_n)
    elif args.model == 'dssiam':
        netpath = os.path.expanduser(args.weights) if args.weights else None
        tracker = TrackerSiamFC(backbone=DSSiam(n=args.seq_len),
                                netpath=netpath)
        if not args.seq_n:
Exemplo n.º 21
0
from os.path import join
import json
from typing import Dict
import os
from got10k.datasets import GOT10k

input_dir = 'got10k'
output_dir = "./"

for subset in ['val', 'train']:
    js = dict()
    dataset = GOT10k(root_dir=input_dir, subset=subset, return_meta=True)
    n_videos = len(dataset)

    for s, (img_files, anno, meta) in enumerate(dataset):
        seq_name = dataset.seq_names[s]
        video_crop_base_path = join(subset, seq_name)

        js[video_crop_base_path] = dict()

        for idx, img_file in enumerate(img_files):
            if meta['absence'][idx] == 1:
                continue
            rect = anno[idx, :]
            bbox = [rect[0], rect[1], rect[0] + rect[2], rect[1] + rect[3]]
            js[video_crop_base_path]['{:02d}'.format(idx)] = {'000000': bbox}

    print('save json (dataset), please wait 20 seconds~')
    output_path = os.path.join(output_dir, '{}.json'.format(subset))
    json.dump(js, open(output_path, 'w'), indent=4, sort_keys=True)
    print('done!')
Exemplo n.º 22
0
def main():
    '''parameter initialization'''
    args = parser.parse_args()
    exp_name_dir = util.experiment_name_dir(args.experiment_name)
    '''model on gpu'''
    model = TrackerSiamRPN()
    '''setup train data loader'''
    name = 'GOT-10k'
    assert name in ['VID', 'GOT-10k', 'All', 'RGBT-234']
    if name == 'GOT-10k':
        root_dir_RGBT234 = args.train_path
        root_dir_GTOT = '/home/krautsct/Grayscale-Thermal-Dataset'
        seq_dataset_rgb = GOT10k(root_dir_RGBT234, subset='train_i')
        seq_dataset_i = GOT10k(root_dir_RGBT234,
                               subset='train_i',
                               visible=False)
    elif name == 'VID':
        root_dir = '/home/arbi/desktop/ILSVRC'
        seq_dataset = ImageNetVID(root_dir, subset=('train'))
    elif name == 'All':
        root_dir_vid = '/home/arbi/desktop/ILSVRC'
        seq_datasetVID = ImageNetVID(root_dir_vid, subset=('train'))
        root_dir_got = args.train_path
        seq_datasetGOT = GOT10k(root_dir_got, subset='train')
        seq_dataset = util.data_split(seq_datasetVID, seq_datasetGOT)
    elif name == 'RGBT-234':
        root_dir = args.train_path
        seq_dataset = RGBTSequence(root_dir, subset='train')
        seq_dataset_val = RGBTSequence(root_dir, subset='val')
    print('seq_dataset', len(seq_dataset_rgb))

    train_z_transforms = transforms.Compose([ToTensor()])
    train_x_transforms = transforms.Compose([ToTensor()])

    train_data_ir = TrainDataLoader_ir(seq_dataset_i, train_z_transforms,
                                       train_x_transforms, name)
    anchors = train_data_ir.anchors
    train_loader_ir = DataLoader(dataset=train_data_ir,
                                 batch_size=config.train_batch_size,
                                 shuffle=True,
                                 num_workers=config.train_num_workers,
                                 pin_memory=True)
    train_data_rgb = TrainDataLoader(seq_dataset_rgb, train_z_transforms,
                                     train_x_transforms, name)
    anchors = train_data_rgb.anchors
    train_loader_rgb = DataLoader(dataset=train_data_rgb,
                                  batch_size=config.train_batch_size,
                                  shuffle=True,
                                  num_workers=config.train_num_workers,
                                  pin_memory=True)
    '''setup val data loader'''
    name = 'GOT-10k'
    assert name in ['VID', 'GOT-10k', 'All', 'RGBT-234']
    if name == 'GOT-10k':
        val_dir = '/home/krautsct/RGB-t-Val'
        seq_dataset_val_rgb = GOT10k(val_dir, subset='train_i')
        seq_dataset_val_ir = GOT10k(val_dir, subset='train_i', visible=False)
    elif name == 'VID':
        root_dir = '/home/arbi/desktop/ILSVRC'
        seq_dataset_val = ImageNetVID(root_dir, subset=('val'))
    elif name == 'All':
        root_dir_vid = '/home/arbi/desktop/ILSVRC'
        seq_datasetVID = ImageNetVID(root_dir_vid, subset=('val'))
        root_dir_got = args.train_path
        seq_datasetGOT = GOT10k(root_dir_got, subset='val')
        seq_dataset_val = util.data_split(seq_datasetVID, seq_datasetGOT)
    print('seq_dataset_val', len(seq_dataset_val_rgb))

    valid_z_transforms = transforms.Compose([ToTensor()])
    valid_x_transforms = transforms.Compose([ToTensor()])

    val_data = TrainDataLoader_ir(seq_dataset_val_ir, valid_z_transforms,
                                  valid_x_transforms, name)
    val_loader_ir = DataLoader(dataset=val_data,
                               batch_size=config.valid_batch_size,
                               shuffle=False,
                               num_workers=config.valid_num_workers,
                               pin_memory=True)
    val_data_rgb = TrainDataLoader(seq_dataset_val_rgb, valid_z_transforms,
                                   valid_x_transforms, name)
    val_loader_rgb = DataLoader(dataset=val_data_rgb,
                                batch_size=config.valid_batch_size,
                                shuffle=False,
                                num_workers=config.valid_num_workers,
                                pin_memory=True)

    val_losslist = []
    '''load weights'''

    if not args.checkpoint_path == None:
        assert os.path.isfile(
            args.checkpoint_path), '{} is not valid checkpoint_path'.format(
                args.checkpoint_path)
        checkpoint = torch.load(args.checkpoint_path, map_location='cpu')
        if 'model' in checkpoint.keys():
            model.net.load_state_dict(
                torch.load(args.checkpoint_path, map_location='cpu')['model'])
        else:
            model.net.load_state_dict(
                torch.load(args.checkpoint_path, map_location='cpu'))
        torch.cuda.empty_cache()
        print('You are loading the model.load_state_dict')

    elif config.pretrained_model:
        checkpoint = torch.load(config.pretrained_model)
        # change name and load parameters
        checkpoint = {
            k.replace('features.features', 'featureExtract'): v
            for k, v in checkpoint.items()
        }
        model_dict = model.net.state_dict()
        model_dict.update(checkpoint)
        model.net.load_state_dict(model_dict)
        #torch.cuda.empty_cache()
    '''train phase'''
    train_closses, train_rlosses, train_tlosses = AverageMeter(), AverageMeter(
    ), AverageMeter()
    val_closses, val_rlosses, val_tlosses = AverageMeter(), AverageMeter(
    ), AverageMeter()

    #train_val_plot = SavePlot(exp_name_dir, 'train_val_plot')
    val_plot = SavePlotVal(exp_name_dir, 'val_plot')
    for epoch in range(config.epoches):
        model.net.train()
        if config.fix_former_3_layers:
            util.freeze_layers(model.net)
        print('Train epoch {}/{}'.format(epoch + 1, config.epoches))
        train_loss = []
        with tqdm(total=config.train_epoch_size) as progbar:
            for i, (dataset_rgb, dataset_ir) in enumerate(
                    zip(train_loader_rgb, train_loader_ir)):
                #for i, dataset_rgb in enumerate(train_loader_rgb):

                closs, rloss, loss = model.step(epoch,
                                                dataset_rgb,
                                                dataset_ir,
                                                anchors,
                                                epoch,
                                                i,
                                                train=True)

                closs_ = closs.cpu().item()

                if np.isnan(closs_):
                    sys.exit(0)

                train_closses.update(closs.cpu().item())
                train_rlosses.update(rloss.cpu().item())
                train_tlosses.update(loss.cpu().item())

                progbar.set_postfix(closs='{:05.3f}'.format(train_closses.avg),
                                    rloss='{:05.5f}'.format(train_rlosses.avg),
                                    tloss='{:05.3f}'.format(train_tlosses.avg))

                progbar.update()
                train_loss.append(train_tlosses.avg)

                if i >= config.train_epoch_size - 1:
                    '''save model'''
                    model.save(model, exp_name_dir, epoch)

                    break

        train_loss = np.mean(train_loss)
        '''val phase'''
        val_loss = []
        with tqdm(total=config.val_epoch_size) as progbar:
            print('Val epoch {}/{}'.format(epoch + 1, config.epoches))
            for i, (dataset_rgb,
                    dataset_ir) in enumerate(zip(val_loader_rgb,
                                                 val_loader_ir)):
                #for i, dataset_rgb in enumerate(val_loader_rgb):

                val_closs, val_rloss, val_tloss = model.step(epoch,
                                                             dataset_rgb,
                                                             dataset_ir,
                                                             anchors,
                                                             epoch,
                                                             train=False)

                closs_ = val_closs.cpu().item()

                if np.isnan(closs_):
                    sys.exit(0)

                val_closses.update(val_closs.cpu().item())
                val_rlosses.update(val_rloss.cpu().item())
                val_tlosses.update(val_tloss.cpu().item())

                progbar.set_postfix(closs='{:05.3f}'.format(val_closses.avg),
                                    rloss='{:05.5f}'.format(val_rlosses.avg),
                                    tloss='{:05.3f}'.format(val_tlosses.avg))

                progbar.update()

                val_loss.append(val_tlosses.avg)

                if i >= config.val_epoch_size - 1:
                    break

        val_loss = np.mean(val_loss)
        #train_val_plot.update(train_loss, val_loss)
        val_plot.update(val_loss)
        val_losslist.append(val_loss)
        print('Train loss: {}, val loss: {}'.format(train_loss, val_loss))
        record_path = os.path.dirname(exp_name_dir)
        if not os.path.isdir(record_path):
            os.makedirs(record_path)
        record_file = os.path.join(exp_name_dir, 'val_losses.txt')
        np.savetxt(record_file, val_losslist, fmt='%.3f', delimiter=',')
Exemplo n.º 23
0
                index = random.choice(range(3000, 8000))
                print("index in self.index")

            if not index in self.index:
                self.index.append(index)
            if len(self.index) >= 3000:
                self.index = []
        else:
            index = random.choice(range(len(self.sub_class_dir)))'''

        if self.name == 'GOT-10k':
            if  index == 4418 or index == 8627 or index == 8629 or index == 9057 or index == 9058 or index==7787:
                index += 3
        self._pick_img_pairs(index)
        self.open()
        self._tranform()
        regression_target, conf_target = self._target()
        self.count += 1

        return self.ret['train_z_transforms'], self.ret['train_x_transforms'], regression_target, conf_target.astype(np.int64)

    def __len__(self):
        return config.train_epoch_size*64    # 1000*64 ???

if __name__ == "__main__":

    root_dir = './data/GOT-10k'
    seq_dataset = GOT10k(root_dir, subset='train')
    train_data  = TrainDataLoader(seq_dataset)
    train_data.__getitem__(180)
Exemplo n.º 24
0
from tqdm import tqdm
from got10k.datasets import ImageNetVID, GOT10k
from pairwise import Pairwise
from siamfc import TrackerSiamFC
from got10k.experiments import *
import numpy as np

from config import config

if __name__ == '__main__':

    # setup the desired dataset for training
    name = 'GOT-10k'
    assert name in ['VID', 'GOT-10k', 'All']
    if name == 'GOT-10k':
        seq_dataset = GOT10k(config.GOT_10k_dataset_directory, subset='train')
        pair_dataset = Pairwise(seq_dataset)
    elif name == 'VID':
        seq_dataset = ImageNetVID(config.Imagenet_dataset_directory,
                                  subset=('train', 'val'))
        pair_dataset = Pairwise(seq_dataset)
    elif name == 'All':
        seq_got_dataset = GOT10k(config.GOT_10k_dataset_directory,
                                 subset='train')
        seq_vid_dataset = ImageNetVID(config.Imagenet_dataset_directory,
                                      subset=('train', 'val'))
        pair_dataset = Pairwise(seq_got_dataset) + Pairwise(seq_vid_dataset)

    print(len(pair_dataset))

    # setup the data loader
Exemplo n.º 25
0
    return img


def gen_pos(bbox):
    min_x, min_y, w, h = bbox
    x0 = np.round(min_x).astype(int)
    y0 = np.round(min_y + h).astype(int)
    x1 = np.round(min_x + w).astype(int)
    y1 = np.round(min_y).astype(int)
    pos0, pos1 = (x0, y0), (x1, y1)

    return pos0, pos1


dataset = GOT10k(root_dir='data/GOT-10k', subset='test')
trackers = ['Ours+AC+DKLm6', 'SiamRPN']
# indexing
img_file, _ = dataset[10]

# for-loop
for s, (img_files, anno) in enumerate(dataset):
    results = []
    seq_name = dataset.seq_names[s]
    print('Sequence:', seq_name)

    for t in range(len(trackers)):
        anno_path = "./results/GOT-10k/{0}/{1}/{2}_001.txt".format(
            trackers[t], seq_name, seq_name)
        results.append(np.loadtxt(anno_path, delimiter=',', dtype=float))
    # show all frames
Exemplo n.º 26
0
from tqdm import tqdm
from got10k.datasets import ImageNetVID, GOT10k
from pairwise_guass import Pairwise
#from pairwise import Pairwise
from siamfc import TrackerSiamFC
from got10k.experiments import *

from config import config

if __name__ == '__main__':

    # setup dataset
    name = 'GOT-10k'
    assert name in ['VID', 'GOT-10k', 'All']
    if name == 'GOT-10k':
        seq_dataset = GOT10k(config.root_dir_for_GOT_10k, subset='train')
        pair_dataset = Pairwise(seq_dataset)
    elif name == 'VID':
        seq_dataset = ImageNetVID(config.root_dir_for_VID, subset=('train', 'val'))
        pair_dataset = Pairwise(seq_dataset)
    elif name == 'All':
        seq_got_dataset = GOT10k(config.root_dir_for_GOT_10k, subset='train')
        seq_vid_dataset = ImageNetVID(config.root_dir_for_VID, subset=('train', 'val'))
        pair_dataset = Pairwise(seq_got_dataset) + Pairwise(seq_vid_dataset)

    print(len(pair_dataset))

    # setup data loader
    cuda = torch.cuda.is_available()
    loader = DataLoader(pair_dataset,
                        batch_size = config.batch_size,
Exemplo n.º 27
0
            if not index in self.index:
                self.index.append(index)
            if len(self.index) >= 3000:
                self.index = []
        else:
            index = random.choice(range(len(self.sub_class_dir)))'''

        if self.name == 'GOT-10k':
            if index == 4418 or index == 4419 or index == 8627 or index == 8629 or index == 9057 or index == 9058:
                index += 3
        self._pick_img_pairs(index)
        self.open()
        self._tranform()
        regression_target, conf_target = self._target()
        self.count += 1

        return self.ret['train_z_transforms'], self.ret[
            'train_x_transforms'], regression_target, conf_target.astype(
                np.int64)

    def __len__(self):
        return config.train_epoch_size * 64


if __name__ == "__main__":

    root_dir = '/Users/arbi/Desktop'
    seq_dataset = GOT10k(root_dir, subset='val')
    train_data = TrainDataLoader(seq_dataset)
    train_data.__getitem__(180)
Exemplo n.º 28
0
def train(data_dir, net_path=None):
    seq_dataset_train = GOT10k(data_dir, subset='train')
    """定义数据增强(图像预处理):归一化、转化为Tensor"""
    train_z_transforms = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.485, 0.456, 0.406),
                             std=(0.229, 0.224, 0.225))
    ])
    train_x_transforms = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.485, 0.456, 0.406),
                             std=(0.229, 0.224, 0.225))
    ])
    """建立训练数据"""
    train_dataset = GOT10kDataset(seq_dataset_train, train_z_transforms,
                                  train_x_transforms)
    anchors = train_dataset.anchors  #(1805,4)
    """加载训练数据"""
    trainloader = DataLoader(dataset=train_dataset,
                             batch_size=config.train_batch_size,
                             shuffle=True,
                             num_workers=config.train_num_workers,
                             pin_memory=True,
                             drop_last=True)
    """"————————————开始训练——————————————————————"""
    model = SiamRPNNet()
    model = model.cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=config.lr,
                                momentum=config.momentum,
                                weight_decay=config.weight_decay)
    start_epoch = 1
    #接着训练
    if net_path:
        print("loading checkpoint %s" % net_path + '\n')
        print(
            '------------------------------------------------------------------------------------------------ \n'
        )
        checkpoint = torch.load(net_path)
        if 'epoch' in checkpoint:
            start_epoch = checkpoint['epoch'] + 1
            model.load_state_dict(checkpoint['model'])
            optimizer.load_state_dict(checkpoint['optimizer'])
        else:
            model.load_state_dict(checkpoint)
        del checkpoint
        torch.cuda.empty_cache()  #缓存清零
        print("loaded checkpoint")

    for epoch in range(start_epoch, config.epoch + 1):
        train_loss = []
        model.train()  #开启训练模式

        loss_temp_cls = 0  #分类损失
        loss_temp_reg = 0  #回归损失
        for i, data in enumerate(tqdm(trainloader)):
            #得到模板、搜索图像、目标回归参数、目标分类分数,大小为torch.Size([16,3,127,127])[16,3,271,271][16,1805,4][16,1805]
            exemplar_imgs, instance_imgs, regression_target, conf_target = data
            regression_target, conf_target = regression_target.cuda(
            ), conf_target.cuda()
            """---将模板和搜索图像输入net,得到回归参数和分类分数---"""
            #score.shape=[8,10,19,19],regression.shape=[8,20,19,19]
            pred_score, pred_regression = model(exemplar_imgs.cuda(),
                                                instance_imgs.cuda())
            #pre_conf.shape=(8,1805,2)
            pred_conf = pred_score.reshape(
                -1, 2, config.anchor_num * config.score_size *
                config.score_size).permute(0, 2, 1)
            #pred_offset.shape=[16,1805,4]
            pred_offset = pred_regression.reshape(
                -1, 4, config.anchor_num * config.score_size *
                config.score_size).permute(0, 2, 1)
            """——————————————计算分类和回归损失————————————————————-"""
            cls_loss = rpn_cross_entropy_balance(pred_conf,
                                                 conf_target,
                                                 config.num_pos,
                                                 config.num_neg,
                                                 anchors,
                                                 nms_pos=config.nms_pos,
                                                 nms_neg=config.nms_neg)
            reg_loss = rpn_smoothL1(pred_offset,
                                    regression_target,
                                    conf_target,
                                    config.num_pos,
                                    nms_reg=config.nms_reg)
            loss = cls_loss + config.loss_weight * reg_loss  #分类权重和回归权重 1:5
            """——————————————————————————————————————————————"""
            """--------优化三件套---------------------------"""
            optimizer.zero_grad()
            loss.backward()
            # config.clip=10 ,clip_grad_norm_梯度裁剪,防止梯度爆炸,但我觉得
            torch.nn.utils.clip_grad_norm_(model.parameters(), config.clip)
            optimizer.step()
            """-----------------------------------------"""
            #当前计算图中分离下来的,但是仍指向原变量的存放位置,requires_grad=false
            train_loss.append(loss.detach().cpu())
            #获取tensor的值有两种方法,转化为.cpu().numpy()或.item()
            loss_temp_cls += cls_loss.detach().cpu().numpy()
            loss_temp_reg += reg_loss.detach().cpu().numpy()

            if (i + 1) % config.show_interval == 0:
                print(
                    "[epoch %2d][iter %4d] cls_loss: %.4f, reg_loss: %.4f lr: %.2e"
                    % (epoch, i, loss_temp_cls / config.show_interval,
                       loss_temp_reg / config.show_interval,
                       optimizer.param_groups[0]['lr']))
                loss_temp_cls = 0
                loss_temp_reg = 0
        train_loss = np.mean(train_loss)
        print("EPOCH %d  train_loss: %.4f" % (epoch, train_loss))
        #再保存模型之前调整optimizer的学习率,模型保存时是保存下一次训练的学习率以便接着训练
        adjust_learning_rate(optimizer, config.gamma)
        if epoch % config.save_interval == 0:
            if not os.path.exists('./pretrained/'):
                os.makedirs("./pretrained/")
            save_name = "./pretrained/siamrpn_{}.pth".format(epoch)

            if torch.cuda.device_count() > 1:  # 多GPU训练
                new_state_dict = model.module.state_dict()
            else:  #单GPU训练
                new_state_dict = model.state_dict()
            torch.save(
                {
                    'epoch': epoch,
                    'model': new_state_dict,
                    'optimizer': optimizer.state_dict(),
                }, save_name)
            print('save model: {}'.format(save_name))
Exemplo n.º 29
0
def main():
    '''parameter initialization'''
    args = parser.parse_args()
    exp_name_dir = util.experiment_name_dir(args.experiment_name)
    '''model on gpu'''
    model = TrackerSiamRPN()
    '''setup train data loader'''
    name = 'GOT-10k'
    assert name in ['VID', 'GOT-10k', 'All']
    if name == 'GOT-10k':
        root_dir = args.train_path
        seq_dataset = GOT10k(root_dir, subset='train')
    elif name == 'VID':
        root_dir = '/store_ssd/ILSVRC'
        seq_dataset = ImageNetVID(root_dir, subset=('train'))
    elif name == 'All':
        root_dir_vid = '/store_ssd/ILSVRC'
        seq_datasetVID = ImageNetVID(root_dir_vid, subset=('train'))
        root_dir_got = args.train_path
        seq_datasetGOT = GOT10k(root_dir_got, subset='train')
        seq_dataset = util.data_split(seq_datasetVID, seq_datasetGOT)
    print('seq_dataset', len(seq_dataset))

    train_z_transforms = transforms.Compose([ToTensor()])
    train_x_transforms = transforms.Compose([
        RandomCrop([config.detection_img_size, config.detection_img_size],
                   config.max_translate),
        RandomScale(config.scale_resize),
        ToTensor()
    ])

    train_data = TrainDataLoader(seq_dataset, train_z_transforms,
                                 train_x_transforms, name)
    anchors = train_data.anchors
    train_loader = DataLoader(dataset=train_data,
                              batch_size=config.train_batch_size,
                              shuffle=True,
                              num_workers=config.train_num_workers,
                              pin_memory=True)
    '''setup val data loader'''
    name = 'GOT-10k'
    assert name in ['VID', 'GOT-10k', 'All']
    if name == 'GOT-10k':
        root_dir = args.train_path
        seq_dataset_val = GOT10k(root_dir, subset='val')
    elif name == 'VID':
        root_dir = '/store_ssd/ILSVRC'
        seq_dataset_val = ImageNetVID(root_dir, subset=('val'))
    elif name == 'All':
        root_dir_vid = '/store_ssd/ILSVRC'
        seq_datasetVID = ImageNetVID(root_dir_vid, subset=('val'))
        root_dir_got = args.train_path
        seq_datasetGOT = GOT10k(root_dir_got, subset='val')
        seq_dataset_val = util.data_split(seq_datasetVID, seq_datasetGOT)
    print('seq_dataset_val', len(seq_dataset_val))

    valid_z_transforms = transforms.Compose([ToTensor()])
    valid_x_transforms = transforms.Compose([ToTensor()])

    val_data = TrainDataLoader(seq_dataset_val, valid_z_transforms,
                               valid_x_transforms, name)
    val_loader = DataLoader(dataset=val_data,
                            batch_size=config.valid_batch_size,
                            shuffle=False,
                            num_workers=config.valid_num_workers,
                            pin_memory=True)
    '''load weights'''

    if not args.checkpoint_path == None and args.epoch_i > 0:
        checkpoint_path = os.path.join(args.checkpoint_path,
                                       'model_e{}.pth'.format(args.epoch_i))
        assert os.path.isfile(
            checkpoint_path), '{} is not valid checkpoint_path'.format(
                checkpoint_path)

        checkpoint = torch.load(checkpoint_path, map_location='cpu')
        if 'model' in checkpoint.keys():
            model.net.load_state_dict(
                torch.load(checkpoint_path, map_location='cpu')['model'])
        else:
            model.net.load_state_dict(
                torch.load(checkpoint_path, map_location='cpu'))
        torch.cuda.empty_cache()
        print('You are loading the model.load_state_dict')

    elif config.pretrained_model:
        checkpoint = torch.load(config.pretrained_model)
        # change name and load parameters
        checkpoint = {
            k.replace('features.features', 'featureExtract'): v
            for k, v in checkpoint.items()
        }
        model_dict = model.net.state_dict()
        model_dict.update(checkpoint)
        model.net.load_state_dict(model_dict)
        #torch.cuda.empty_cache()
        print('You are loading the pretrained model')
    '''train phase'''
    train_closses, train_rlosses, train_tlosses = AverageMeter(), AverageMeter(
    ), AverageMeter()
    val_closses, val_rlosses, val_tlosses = AverageMeter(), AverageMeter(
    ), AverageMeter()

    train_val_plot = SavePlot(exp_name_dir, 'train_val_plot')
    model.adjust_lr(args.epoch_i)

    for epoch in range(args.epoch_i, config.epoches):
        model.net.train()
        if config.fix_former_3_layers:
            util.freeze_layers(model.net)
        print('Train epoch {}/{}'.format(epoch + 1, config.epoches))
        train_loss = []
        with tqdm(total=config.train_epoch_size) as progbar:
            for i, dataset in enumerate(train_loader):

                closs, rloss, loss = model.step(epoch,
                                                dataset,
                                                anchors,
                                                i,
                                                train=True)

                closs_ = closs.cpu().item()

                if np.isnan(closs_):
                    sys.exit(0)

                train_closses.update(closs.cpu().item())
                train_rlosses.update(rloss.cpu().item())
                train_tlosses.update(loss.cpu().item())

                progbar.set_postfix(closs='{:05.3f}'.format(train_closses.avg),
                                    rloss='{:05.5f}'.format(train_rlosses.avg),
                                    tloss='{:05.3f}'.format(train_tlosses.avg))

                progbar.update()
                train_loss.append(train_tlosses.avg)

                if i >= config.train_epoch_size - 1:
                    '''save model'''
                    model.save(model, exp_name_dir, epoch)

                    break

        train_loss = np.mean(train_loss)
        '''val phase'''
        val_loss = []
        with tqdm(total=config.val_epoch_size) as progbar:
            print('Val epoch {}/{}'.format(epoch + 1, config.epoches))
            for i, dataset in enumerate(val_loader):

                val_closs, val_rloss, val_tloss = model.step(epoch,
                                                             dataset,
                                                             anchors,
                                                             train=False)

                closs_ = val_closs.cpu().item()

                if np.isnan(closs_):
                    sys.exit(0)

                val_closses.update(val_closs.cpu().item())
                val_rlosses.update(val_rloss.cpu().item())
                val_tlosses.update(val_tloss.cpu().item())

                progbar.set_postfix(closs='{:05.3f}'.format(val_closses.avg),
                                    rloss='{:05.5f}'.format(val_rlosses.avg),
                                    tloss='{:05.3f}'.format(val_tlosses.avg))

                progbar.update()

                val_loss.append(val_tlosses.avg)

                if i >= config.val_epoch_size - 1:
                    break

        val_loss = np.mean(val_loss)
        train_val_plot.update(train_loss, val_loss)
        print('Train loss: {}, val loss: {}'.format(train_loss, val_loss))