예제 #1
0
def run(settings):
    # Most common settings are assigned in the settings struct
    settings.description = 'ATOM IoUNet with default settings according to the paper.'
    settings.batch_size = 64
    settings.num_workers = 8
    settings.print_interval = 1
    settings.normalize_mean = [0.485, 0.456, 0.406]
    settings.normalize_std = [0.229, 0.224, 0.225]
    settings.search_area_factor = 5.0
    settings.feature_sz = 18
    settings.output_sz = settings.feature_sz * 16
    settings.center_jitter_factor = {'train': 0, 'test': 4.5}
    settings.scale_jitter_factor = {'train': 0, 'test': 0.5}

    # Train datasets
    lasot_train = Lasot(settings.env.lasot_dir, split='train')
    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(11)))
    coco_train = MSCOCOSeq(settings.env.coco_dir)

    # Validation datasets
    trackingnet_val = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(11,12)))

    # The joint augmentation transform, that is applied to the pairs jointly
    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))

    # The augmentation transform applied to the training set (individually to each image in the pair)
    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),
                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))

    # The augmentation transform applied to the validation set (individually to each image in the pair)
    transform_val = tfm.Transform(tfm.ToTensor(),
                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))

    # Data processing to do on the training pairs
    proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 16, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]}
    data_processing_train = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,
                                                      output_sz=settings.output_sz,
                                                      center_jitter_factor=settings.center_jitter_factor,
                                                      scale_jitter_factor=settings.scale_jitter_factor,
                                                      mode='sequence',
                                                      proposal_params=proposal_params,
                                                      transform=transform_train,
                                                      joint_transform=transform_joint)

    # Data processing to do on the validation pairs
    data_processing_val = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,
                                                    output_sz=settings.output_sz,
                                                    center_jitter_factor=settings.center_jitter_factor,
                                                    scale_jitter_factor=settings.scale_jitter_factor,
                                                    mode='sequence',
                                                    proposal_params=proposal_params,
                                                    transform=transform_val,
                                                    joint_transform=transform_joint)

    # The sampler for training
    dataset_train = sampler.ATOMSampler([lasot_train, trackingnet_train, coco_train], [1,1,1],
                                samples_per_epoch=1000*settings.batch_size, max_gap=50, processing=data_processing_train)

    # The loader for training
    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,
                             shuffle=True, drop_last=True, stack_dim=1)

    # The sampler for validation
    dataset_val = sampler.ATOMSampler([trackingnet_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=50,
                              processing=data_processing_val)

    # The loader for validation
    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,
                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)

    # Create network and actor
    net = atom_models.atom_resnet18(backbone_pretrained=True)
    objective = nn.MSELoss()
    actor = actors.AtomActor(net=net, objective=objective)

    # Optimizer
    optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3)
    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)

    # Create trainer
    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)

    # Run training (set fail_safe=False if you are debugging)
    trainer.train(50, load_latest=True, fail_safe=True)
예제 #2
0
def run(settings):
    # Most common settings are assigned in the settings struct
    settings.description = 'ATOM IoUNet with ResNet18 backbone and trained with vid, lasot, coco.'
    settings.print_interval = 1  # How often to print loss and other info
    settings.batch_size = 64  # Batch size
    settings.num_workers = 4  # Number of workers for image loading
    settings.normalize_mean = [0.485, 0.456, 0.406
                               ]  # Normalize mean (default ImageNet values)
    settings.normalize_std = [0.229, 0.224,
                              0.225]  # Normalize std (default ImageNet values)
    settings.search_area_factor = 5.0  # Image patch size relative to target size
    settings.feature_sz = 18  # Size of feature map
    settings.output_sz = settings.feature_sz * 16  # Size of input image patches

    # Settings for the image sample and proposal generation
    settings.center_jitter_factor = {'train': 0, 'test': 4.5}
    settings.scale_jitter_factor = {'train': 0, 'test': 0.5}
    settings.proposal_params = {
        'min_iou': 0.1,
        'boxes_per_frame': 16,
        'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]
    }

    # Train datasets
    vid_train = ImagenetVID()
    lasot_train = Lasot(split='train')
    coco_train = MSCOCOSeq()

    # Validation datasets
    got10k_val = Got10k(split='val')

    # The joint augmentation transform, that is applied to the pairs jointly
    transform_joint = dltransforms.ToGrayscale(probability=0.05)

    # The augmentation transform applied to the training set (individually to each image in the pair)
    transform_train = dltransforms.Compose([
        dltransforms.ToArrayAndJitter(0.2),
        dltransforms.Normalize(mean=settings.normalize_mean,
                               std=settings.normalize_std)
    ])

    # The augmentation transform applied to the validation set (individually to each image in the pair)
    transform_val = dltransforms.Compose([
        dltransforms.ToArray(),
        dltransforms.Normalize(mean=settings.normalize_mean,
                               std=settings.normalize_std)
    ])

    # Data processing to do on the training pairs
    data_processing_train = processing.ATOMProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        mode='sequence',
        proposal_params=settings.proposal_params,
        transform=transform_train,
        joint_transform=transform_joint)

    # Data processing to do on the validation pairs
    data_processing_val = processing.ATOMProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        mode='sequence',
        proposal_params=settings.proposal_params,
        transform=transform_val,
        joint_transform=transform_joint)

    # The sampler for training
    dataset_train = sampler.ATOMSampler(
        [vid_train, lasot_train, coco_train], [1, 1, 1],
        samples_per_epoch=1000 * settings.batch_size,
        max_gap=50,
        processing=data_processing_train)

    # The loader for training
    train_loader = loader.LTRLoader('train',
                                    dataset_train,
                                    training=True,
                                    batch_size=settings.batch_size,
                                    num_workers=4,
                                    stack_dim=1)

    # The sampler for validation
    dataset_val = sampler.ATOMSampler([got10k_val], [
        1,
    ],
                                      samples_per_epoch=500 *
                                      settings.batch_size,
                                      max_gap=50,
                                      processing=data_processing_val)

    # The loader for validation
    val_loader = loader.LTRLoader('val',
                                  dataset_val,
                                  training=False,
                                  batch_size=settings.batch_size,
                                  epoch_interval=5,
                                  num_workers=4,
                                  stack_dim=1)

    # creat network, set objective, creat optimizer, learning rate scheduler, trainer
    with dygraph.guard():
        # Create network
        net = atom_resnet18(backbone_pretrained=True)

        # Freeze backbone
        state_dicts = net.state_dict()
        for k in state_dicts.keys():
            if 'feature_extractor' in k and "running" not in k:
                state_dicts[k].stop_gradient = True

        # Set objective
        objective = fluid.layers.square_error_cost

        # Create actor, which wraps network and objective
        actor = actors.AtomActor(net=net, objective=objective)

        # Set to training mode
        actor.train()

        # define optimizer and learning rate
        gama = 0.2
        lr = 1e-3
        lr_scheduler = fluid.dygraph.PiecewiseDecay(
            [15, 30, 45],
            values=[lr, lr * gama, lr * gama * gama],
            step=1000,
            begin=0)

        optimizer = fluid.optimizer.Adam(
            parameter_list=net.bb_regressor.parameters(),
            learning_rate=lr_scheduler)

        trainer = LTRTrainer(actor, [train_loader, val_loader], optimizer,
                             settings, lr_scheduler)
        trainer.train(40, load_latest=False, fail_safe=False)
예제 #3
0
def run(settings):
    settings.description = 'Default train settings for FCOT with ResNet50 as backbone.'
    settings.multi_gpu = True
    settings.print_interval = 1
    settings.normalize_mean = [0.485, 0.456, 0.406]
    settings.normalize_std = [0.229, 0.224, 0.225]
    settings.search_area_factor = 5.0
    settings.output_sigma_factor = 1 / 4
    settings.clf_target_filter_sz = 4
    settings.reg_target_filter_sz = 3
    settings.feature_sz = 18
    settings.output_sz = settings.feature_sz * 16
    settings.center_jitter_factor = {'train': 3, 'test': 4.5}
    settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5}
    settings.hinge_threshold = 0.05
    settings.logging_file = 'fcot_log.txt'

    # Train datasets
    lasot_train = Lasot(settings.env.lasot_dir, split='train')
    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')
    trackingnet_train = TrackingNet(settings.env.trackingnet_dir,
                                    set_ids=list(range(4)))
    coco_train = MSCOCOSeq(settings.env.coco_dir)

    # Validation datasets
    got10k_val = Got10k(settings.env.got10k_dir, split='votval')

    # Data transform
    transform_joint = dltransforms.ToGrayscale(probability=0.05)

    transform_train = torchvision.transforms.Compose([
        dltransforms.ToTensorAndJitter(0.2),
        torchvision.transforms.Normalize(mean=settings.normalize_mean,
                                         std=settings.normalize_std)
    ])

    transform_val = torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize(mean=settings.normalize_mean,
                                         std=settings.normalize_std)
    ])

    # The tracking pairs processing module
    output_sigma = settings.output_sigma_factor / settings.search_area_factor
    proposal_params = {
        'min_iou': 0.1,
        'boxes_per_frame': 8,
        'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]
    }
    label_params = {
        'feature_sz': settings.feature_sz,
        'sigma_factor': output_sigma,
        'kernel_sz': settings.clf_target_filter_sz
    }
    data_processing_train = processing_fcot.AnchorFreeProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        mode='sequence',
        output_spatial_scale=72 / 288.,
        proposal_params=proposal_params,
        label_function_params=label_params,
        transform=transform_train,
        joint_transform=transform_joint)

    data_processing_val = processing_fcot.AnchorFreeProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        mode='sequence',
        output_spatial_scale=72 / 288.,
        proposal_params=proposal_params,
        label_function_params=label_params,
        transform=transform_val,
        joint_transform=transform_joint)

    # Train sampler and loader
    dataset_train = sampler.FCOTSampler(
        [lasot_train, got10k_train, trackingnet_train, coco_train],
        [settings.lasot_rate, 1, 1, 1],
        samples_per_epoch=settings.samples_per_epoch,
        max_gap=30,
        num_test_frames=3,
        num_train_frames=3,
        processing=data_processing_train)

    loader_train = LTRLoader('train',
                             dataset_train,
                             training=True,
                             batch_size=settings.batch_size,
                             num_workers=settings.num_workers,
                             shuffle=True,
                             drop_last=True,
                             stack_dim=1)

    # Validation samplers and loaders
    dataset_val = sampler.FCOTSampler([got10k_val], [1],
                                      samples_per_epoch=5000,
                                      max_gap=30,
                                      num_test_frames=3,
                                      num_train_frames=3,
                                      processing=data_processing_val)

    loader_val = LTRLoader('val',
                           dataset_val,
                           training=False,
                           batch_size=settings.batch_size,
                           epoch_interval=5,
                           num_workers=settings.num_workers,
                           shuffle=False,
                           drop_last=True,
                           stack_dim=1)

    # Create network
    net = fcotnet.fcotnet(
        clf_filter_size=settings.clf_target_filter_sz,
        reg_filter_size=settings.reg_target_filter_sz,
        backbone_pretrained=True,
        optim_iter=5,
        norm_scale_coef=settings.norm_scale_coef,
        clf_feat_norm=True,
        clf_feat_blocks=0,
        final_conv=True,
        out_feature_dim=512,
        optim_init_step=0.9,
        optim_init_reg=0.1,
        init_gauss_sigma=output_sigma * settings.feature_sz,
        num_dist_bins=100,
        bin_displacement=0.1,
        mask_init_factor=3.0,
        target_mask_act='sigmoid',
        score_act='relu',
        train_reg_optimizer=settings.train_reg_optimizer,
        train_cls_72_and_reg_init=settings.train_cls_72_and_reg_init,
        train_cls_18=settings.train_cls_18)

    # Load dimp-model as initial weights
    device = torch.device('cuda:{}'.format(settings.devices_id[0]) if torch.
                          cuda.is_available() else 'cpu')
    if settings.use_pretrained_dimp:
        assert settings.pretrained_dimp50 is not None
        dimp50 = torch.load(settings.pretrained_dimp50, map_location=device)
        state_dict = collections.OrderedDict()
        for key, v in dimp50['net'].items():
            if key.split('.')[0] == 'feature_extractor':
                state_dict['.'.join(key.split('.')[1:])] = v

        net.feature_extractor.load_state_dict(state_dict)

        state_dict = collections.OrderedDict()
        for key, v in dimp50['net'].items():
            if key.split('.')[0] == 'classifier':
                state_dict['.'.join(key.split('.')[1:])] = v
        net.classifier_18.load_state_dict(state_dict)
        print("loading backbone and Classifier modules from DiMP50 done.")

    # Load fcot-model trained in the previous stage
    if settings.load_model:
        assert settings.fcot_model is not None
        load_dict = torch.load(settings.fcot_model)
        fcot_dict = net.state_dict()
        load_fcotnet_dict = {
            k: v
            for k, v in load_dict['net'].items() if k in fcot_dict
        }
        fcot_dict.update(load_fcotnet_dict)
        net.load_state_dict(fcot_dict)
        print("loading FCOT model done.")

    # Wrap the network for multi GPU training
    if settings.multi_gpu:
        net = MultiGPU(net, device_ids=settings.devices_id, dim=1).to(device)

    # Loss for cls_72, cls_18 and regression
    objective = {
        'test_clf_72': ltr_losses.LBHinge(threshold=settings.hinge_threshold),
        'test_clf_18': ltr_losses.LBHinge(threshold=settings.hinge_threshold),
        'reg_72': REGLoss(dim=4)
    }

    # Create actor and adam-optimizer
    if settings.train_cls_72_and_reg_init and settings.train_cls_18:
        ### train regression branch and clssification branches jointly, except for regression optimizer (TODO: fix)
        print("train cls_72, cls_18 and reg_init jointly...")
        loss_weight = {
            'test_clf_72': 100,
            'test_init_clf_72': 100,
            'test_iter_clf_72': 400,
            'test_clf_18': 100,
            'test_init_clf_18': 100,
            'test_iter_clf_18': 400,
            'reg_72': 1
        }
        actor = actors.FcotActor(net=net,
                                 objective=objective,
                                 loss_weight=loss_weight,
                                 device=device)
        optimizer = optim.Adam(
            [{
                'params':
                actor.net.classifier_72.filter_initializer.parameters(),
                'lr': 5e-5
            }, {
                'params':
                actor.net.classifier_72.filter_optimizer.parameters(),
                'lr': 5e-4
            }, {
                'params':
                actor.net.classifier_72.feature_extractor.parameters(),
                'lr': 5e-5
            }, {
                'params':
                actor.net.classifier_18.filter_initializer.parameters(),
                'lr': 5e-5
            }, {
                'params':
                actor.net.classifier_18.filter_optimizer.parameters(),
                'lr': 5e-4
            }, {
                'params':
                actor.net.classifier_18.feature_extractor.parameters(),
                'lr': 5e-5
            }, {
                'params': actor.net.regressor_72.parameters()
            }, {
                'params': actor.net.pyramid_first_conv.parameters()
            }, {
                'params': actor.net.pyramid_36.parameters()
            }, {
                'params': actor.net.pyramid_72.parameters()
            }, {
                'params': actor.net.feature_extractor.parameters(),
                'lr': 2e-5
            }],
            lr=2e-4)
        lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                      milestones=[35, 46, 60],
                                                      gamma=0.2)
    elif settings.train_cls_72_and_reg_init:
        # Setting of the first training stage: train backbone, cls_72 and regression (except for regression optimizer) branch.
        print("train cls_72 and reg_init...")
        loss_weight = {
            'test_clf_72': 100,
            'test_init_clf_72': 10,
            'test_iter_clf_72': 400,
            'test_clf_18': 0,
            'test_init_clf_18': 0,
            'test_iter_clf_18': 0,
            'reg_72': 0.3
        }
        actor = actors.FcotCls72AndRegInitActor(net=net,
                                                objective=objective,
                                                loss_weight=loss_weight,
                                                device=device)
        optimizer = optim.Adam(
            [{
                'params':
                actor.net.classifier_72.filter_initializer.parameters(),
                'lr': 5e-5
            }, {
                'params':
                actor.net.classifier_72.filter_optimizer.parameters(),
                'lr': 5e-4
            }, {
                'params':
                actor.net.classifier_72.feature_extractor.parameters(),
                'lr': 5e-5
            }, {
                'params': actor.net.regressor_72.parameters()
            }, {
                'params': actor.net.pyramid_first_conv.parameters()
            }, {
                'params': actor.net.pyramid_36.parameters()
            }, {
                'params': actor.net.pyramid_72.parameters()
            }, {
                'params': actor.net.feature_extractor.parameters(),
                'lr': 2e-5
            }],
            lr=2e-4)
        lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                      milestones=[35, 45, 69],
                                                      gamma=0.2)
    elif settings.train_reg_optimizer:
        # Setting of the second training stage: train regression optimizer.
        print("train regression optimizer...")
        loss_weight = {
            'test_reg_72': 1,
            'test_init_reg_72': 0,
            'test_iter_reg_72': 1
        }
        actor = actors.FcotOnlineRegressionActor(net=net,
                                                 objective=objective,
                                                 loss_weight=loss_weight,
                                                 device=device)
        optimizer = optim.Adam(
            [{
                'params': actor.net.regressor_72.filter_optimizer.parameters()
            }],
            lr=5e-4)
        lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                      milestones=[2],
                                                      gamma=0.2)
    elif settings.train_cls_18:
        print("train cls_18...")
        # Setting of the third training stage: train cls_18 branch.
        loss_weight = {
            'test_clf_18': 100,
            'test_init_clf_18': 100,
            'test_iter_clf_18': 400
        }
        actor = actors.FcotCls18Actor(net=net,
                                      objective=objective,
                                      loss_weight=loss_weight,
                                      device=device)
        optimizer = optim.Adam(
            [{
                'params':
                actor.net.classifier_18.filter_initializer.parameters(),
                'lr': 5e-5
            }, {
                'params':
                actor.net.classifier_18.filter_optimizer.parameters(),
                'lr': 5e-4
            }, {
                'params':
                actor.net.classifier_18.feature_extractor.parameters(),
                'lr': 5e-5
            }],
            lr=2e-4)
        lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                      milestones=[25],
                                                      gamma=0.2)
    else:
        # TODO: train jointly
        raise Exception("Please run training in correct way.")

    trainer = LTRFcotTrainer(actor, [loader_train, loader_val],
                             optimizer,
                             settings,
                             device,
                             lr_scheduler,
                             logging_file=settings.logging_file)

    trainer.train(settings.total_epochs, load_latest=True, fail_safe=True)
예제 #4
0
def run(settings):
    # Most common settings are assigned in the settings struct
    settings.description = 'SiamRPN with AlexNet backbone.'
    settings.print_interval = 100  # How often to print loss and other info
    settings.batch_size = 512  # Batch size
    settings.samples_per_epoch = 600000 # Number of training pairs per epoch
    settings.num_workers = 8  # Number of workers for image loading
    settings.search_area_factor = {'train': 1.0, 'test': 2.0}
    settings.output_sz = {'train': 127, 'test': 255}
    settings.scale_type = 'context'
    settings.border_type = 'meanpad'

    # Settings for the image sample and label generation
    settings.center_jitter_factor = {'train': 0.125, 'test': 2.0}
    settings.scale_jitter_factor = {'train': 0.05, 'test': 0.18}
    settings.label_params = {
        'search_size': 255,
        'output_size': 17,
        'anchor_stride': 8,
        'anchor_ratios': [0.33, 0.5, 1, 2, 3],
        'anchor_scales': [8],
        'num_pos': 16,
        'num_neg': 16,
        'num_total': 64,
        'thr_high': 0.6,
        'thr_low': 0.3
    }
    settings.loss_weights = {'cls': 1., 'loc': 1.2}
    settings.neg = 0.2

    # Train datasets
    vos_train = YoutubeVOS()
    vid_train = ImagenetVID()
    coco_train = MSCOCOSeq()
    det_train = ImagenetDET()
    #lasot_train = Lasot(split='train')
    #got10k_train = Got10k(split='train')

    # Validation datasets
    vid_val = ImagenetVID()

    # The joint augmentation transform, that is applied to the pairs jointly
    transform_joint = dltransforms.ToGrayscale(probability=0.25)

    # The augmentation transform applied to the training set (individually to each image in the pair)
    transform_exemplar = dltransforms.Transpose()
    transform_instance = dltransforms.Compose(
        [
            dltransforms.Color(probability=1.0),
            dltransforms.Blur(probability=0.18),
            dltransforms.Transpose()
        ])
    transform_instance_mask = dltransforms.Transpose()

    # Data processing to do on the training pairs
    data_processing_train = processing.SiamProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        scale_type=settings.scale_type,
        border_type=settings.border_type,
        mode='sequence',
        label_params=settings.label_params,
        train_transform=transform_exemplar,
        test_transform=transform_instance,
        test_mask_transform=transform_instance_mask,
        joint_transform=transform_joint)

    # Data processing to do on the validation pairs
    data_processing_val = processing.SiamProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        scale_type=settings.scale_type,
        border_type=settings.border_type,
        mode='sequence',
        label_params=settings.label_params,
        transform=transform_exemplar,
        joint_transform=transform_joint)

    nums_per_epoch = settings.samples_per_epoch // settings.batch_size
    # The sampler for training
    dataset_train = sampler.MaskSampler(
        [vid_train, coco_train, det_train, vos_train],
        [2, 1, 1, 2],
        samples_per_epoch=nums_per_epoch * settings.batch_size,
        max_gap=100,
        processing=data_processing_train,
        neg=settings.neg)

    # The loader for training
    train_loader = loader.LTRLoader(
        'train',
        dataset_train,
        training=True,
        batch_size=settings.batch_size,
        num_workers=settings.num_workers,
        stack_dim=0)

    # The sampler for validation
    dataset_val = sampler.MaskSampler(
        [vid_val],
        [1, ],
        samples_per_epoch=100 * settings.batch_size,
        max_gap=100,
        processing=data_processing_val)

    # The loader for validation
    val_loader = loader.LTRLoader(
        'val',
        dataset_val,
        training=False,
        batch_size=settings.batch_size,
        num_workers=settings.num_workers,
        stack_dim=0)

    # creat network, set objective, creat optimizer, learning rate scheduler, trainer
    with dygraph.guard():
        # Create network

        def scale_loss(loss):
            total_loss = 0
            for k in settings.loss_weights:
                total_loss += loss[k] * settings.loss_weights[k]
            return total_loss
        
        net = SiamRPN_AlexNet(scale_loss=scale_loss)

        # Define objective
        objective = {
            'cls': select_softmax_with_cross_entropy_loss,
            'loc': weight_l1_loss,
        }

        # Create actor, which wraps network and objective
        actor = actors.SiamActor(net=net, objective=objective)

        # Define optimizer and learning rate
        decayed_lr = fluid.layers.exponential_decay(
            learning_rate=0.01,
            decay_steps=nums_per_epoch,
            decay_rate=0.9407,
            staircase=True)
        lr_scheduler = LinearLrWarmup(
            learning_rate=decayed_lr,
            warmup_steps=5*nums_per_epoch,
            start_lr=0.005,
            end_lr=0.01)
        optimizer = fluid.optimizer.Adam(
            parameter_list=net.rpn_head.parameters(),
            learning_rate=lr_scheduler)

        trainer = LTRTrainer(actor, [train_loader, val_loader], optimizer, settings, lr_scheduler)
        trainer.train(50, load_latest=False, fail_safe=False)
예제 #5
0
def run(settings):
    settings.description = 'Default train settings for PrDiMP with ResNet50 as backbone.'
    settings.batch_size = 10
    settings.num_workers = 8
    settings.multi_gpu = False
    settings.print_interval = 1
    settings.normalize_mean = [0.485, 0.456, 0.406]
    settings.normalize_std = [0.229, 0.224, 0.225]
    settings.search_area_factor = 5.0
    settings.output_sigma_factor = 1/4
    settings.target_filter_sz = 4
    settings.feature_sz = 18
    settings.output_sz = settings.feature_sz * 16
    settings.center_jitter_factor = {'train': 3, 'test': 4.5}
    settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5}
    settings.hinge_threshold = 0.05
    settings.print_stats = ['Loss/total', 'Loss/bb_ce', 'ClfTrain/clf_ce']

    # Train datasets
    lasot_train = Lasot(settings.env.lasot_dir, split='train')
    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')
    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))
    coco_train = MSCOCOSeq(settings.env.coco_dir)

    # Validation datasets
    got10k_val = Got10k(settings.env.got10k_dir, split='votval')


    # Data transform
    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))

    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),
                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))

    transform_val = tfm.Transform(tfm.ToTensor(),
                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))

    # The tracking pairs processing module
    output_sigma = settings.output_sigma_factor / settings.search_area_factor
    proposal_params = {'boxes_per_frame': 128, 'gt_sigma': (0.05, 0.05), 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)]}
    label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz}
    label_density_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz, 'normalize': True}

    data_processing_train = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor,
                                                        output_sz=settings.output_sz,
                                                        center_jitter_factor=settings.center_jitter_factor,
                                                        scale_jitter_factor=settings.scale_jitter_factor,
                                                        mode='sequence',
                                                        proposal_params=proposal_params,
                                                        label_function_params=label_params,
                                                        label_density_params=label_density_params,
                                                        transform=transform_train,
                                                        joint_transform=transform_joint)

    data_processing_val = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor,
                                                      output_sz=settings.output_sz,
                                                      center_jitter_factor=settings.center_jitter_factor,
                                                      scale_jitter_factor=settings.scale_jitter_factor,
                                                      mode='sequence',
                                                      proposal_params=proposal_params,
                                                      label_function_params=label_params,
                                                      label_density_params=label_density_params,
                                                      transform=transform_val,
                                                      joint_transform=transform_joint)

    # Train sampler and loader
    dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [0.25,1,1,1],
                                        samples_per_epoch=26000, max_gap=200, num_test_frames=3, num_train_frames=3,
                                        processing=data_processing_train)

    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,
                             shuffle=True, drop_last=True, stack_dim=1)

    # Validation samplers and loaders
    dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=5000, max_gap=200,
                                      num_test_frames=3, num_train_frames=3,
                                      processing=data_processing_val)

    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,
                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)

    # Create network and actor
    net = dimpnet.klcedimpnet50(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5,
                                clf_feat_norm=True, clf_feat_blocks=0, final_conv=True, out_feature_dim=512,
                                optim_init_step=1.0, optim_init_reg=0.05, optim_min_reg=0.05,
                                gauss_sigma=output_sigma * settings.feature_sz, alpha_eps=0.05, normalize_label=True, init_initializer='zero')

    # Wrap the network for multi GPU training
    if settings.multi_gpu:
        net = MultiGPU(net, dim=1)

    objective = {'bb_ce': klreg_losses.KLRegression(), 'clf_ce': klreg_losses.KLRegressionGrid()}

    loss_weight = {'bb_ce': 0.0025, 'clf_ce': 0.25, 'clf_ce_init': 0.25, 'clf_ce_iter': 1.0}

    actor = tracking_actors.KLDiMPActor(net=net, objective=objective, loss_weight=loss_weight)

    # Optimizer
    optimizer = optim.Adam([{'params': actor.net.classifier.parameters(), 'lr': 1e-3},
                            {'params': actor.net.bb_regressor.parameters(), 'lr': 1e-3},
                            {'params': actor.net.feature_extractor.parameters(), 'lr': 2e-5}],
                           lr=2e-4)

    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)

    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)

    trainer.train(50, load_latest=True, fail_safe=True)
예제 #6
0
def run(settings):
    settings.description = 'Default train settings for training VOS with box initialization.'
    settings.batch_size = 8
    settings.num_workers = 4
    settings.multi_gpu = False
    settings.print_interval = 1
    settings.normalize_mean = [102.9801, 115.9465, 122.7717]
    settings.normalize_std = [1.0, 1.0, 1.0]

    settings.feature_sz = (52, 30)
    settings.output_sz = (settings.feature_sz[0] * 16,
                          settings.feature_sz[1] * 16)
    settings.search_area_factor = 5.0
    settings.crop_type = 'inside_major'
    settings.max_scale_change = None
    settings.device = "cuda:0"
    settings.center_jitter_factor = {'train': 3, 'test': (5.5, 4.5)}
    settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5}

    settings.min_target_area = 500

    ytvos_train = YouTubeVOS(version="2019", multiobj=False, split='jjtrain')
    ytvos_valid = YouTubeVOS(version="2019", multiobj=False, split='jjvalid')
    coco_train = MSCOCOSeq()

    # Data transform
    transform_joint = tfm.Transform(tfm.ToBGR(),
                                    tfm.ToGrayscale(probability=0.05),
                                    tfm.RandomHorizontalFlip(probability=0.5))

    transform_train = tfm.Transform(
        tfm.ToTensorAndJitter(0.2, normalize=False),
        tfm.Normalize(mean=settings.normalize_mean,
                      std=settings.normalize_std))

    transform_val = tfm.Transform(
        tfm.ToTensorAndJitter(0.0, normalize=False),
        tfm.Normalize(mean=settings.normalize_mean,
                      std=settings.normalize_std))

    data_processing_train = processing.LWLProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        mode='sequence',
        crop_type=settings.crop_type,
        max_scale_change=settings.max_scale_change,
        transform=transform_train,
        joint_transform=transform_joint,
        new_roll=True)

    data_processing_val = processing.LWLProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        mode='sequence',
        crop_type=settings.crop_type,
        max_scale_change=settings.max_scale_change,
        transform=transform_val,
        joint_transform=transform_joint,
        new_roll=True)
    # Train sampler and loader
    dataset_train = sampler.LWLSampler([ytvos_train, coco_train], [1, 1],
                                       samples_per_epoch=settings.batch_size *
                                       1000,
                                       max_gap=100,
                                       num_test_frames=1,
                                       num_train_frames=1,
                                       processing=data_processing_train)
    dataset_val = sampler.LWLSampler([ytvos_valid], [1],
                                     samples_per_epoch=settings.batch_size *
                                     100,
                                     max_gap=100,
                                     num_test_frames=1,
                                     num_train_frames=1,
                                     processing=data_processing_val)

    loader_train = LTRLoader('train',
                             dataset_train,
                             training=True,
                             num_workers=settings.num_workers,
                             stack_dim=1,
                             batch_size=settings.batch_size)
    loader_val = LTRLoader('val',
                           dataset_val,
                           training=False,
                           num_workers=settings.num_workers,
                           epoch_interval=5,
                           stack_dim=1,
                           batch_size=settings.batch_size)

    net = lwt_box_networks.steepest_descent_resnet50(
        filter_size=3,
        num_filters=16,
        optim_iter=5,
        backbone_pretrained=True,
        out_feature_dim=512,
        frozen_backbone_layers=['conv1', 'bn1', 'layer1'],
        label_encoder_dims=(16, 32, 64),
        use_bn_in_label_enc=False,
        clf_feat_blocks=0,
        final_conv=True,
        backbone_type='mrcnn',
        box_label_encoder_dims=(
            64,
            64,
        ),
        final_bn=False)

    base_net_weights = network_loading.load_trained_network(
        settings.env.workspace_dir,
        'ltr/lwl/lwl_stage2/LWTLNet_ep0080.pth.tar')

    # Copy weights
    net.feature_extractor.load_state_dict(
        base_net_weights.feature_extractor.state_dict())
    net.target_model.load_state_dict(
        base_net_weights.target_model.state_dict())
    net.decoder.load_state_dict(base_net_weights.decoder.state_dict())
    net.label_encoder.load_state_dict(
        base_net_weights.label_encoder.state_dict())

    # Wrap the network for multi GPU training
    if settings.multi_gpu:
        net = MultiGPU(net, dim=1)

    objective = {
        'segm': LovaszSegLoss(per_image=False),
    }

    loss_weight = {
        'segm': 100.0,
        'segm_box': 10.0,
        'segm_train': 10,
    }

    actor = lwtl_actors.LWLBoxActor(net=net,
                                    objective=objective,
                                    loss_weight=loss_weight)

    # Optimizer
    optimizer = optim.Adam([{
        'params': actor.net.box_label_encoder.parameters(),
        'lr': 1e-3
    }],
                           lr=2e-4)

    lr_scheduler = optim.lr_scheduler.StepLR(optimizer,
                                             step_size=20,
                                             gamma=0.2)

    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer,
                         settings, lr_scheduler)

    trainer.train(50, load_latest=True, fail_safe=True)
예제 #7
0
def run(settings):
    settings.description = 'Default train settings for DiMP with ResNet18 as backbone.'
    settings.batch_size = 26
    settings.num_workers = 8
    settings.multi_gpu = False
    settings.print_interval = 1
    settings.normalize_mean = [0.485, 0.456, 0.406]
    settings.normalize_std = [0.229, 0.224, 0.225]
    settings.search_area_factor = 5.0
    settings.output_sigma_factor = 1 / 4
    settings.target_filter_sz = 4
    settings.feature_sz = 18
    settings.output_sz = settings.feature_sz * 16
    settings.center_jitter_factor = {'train': 3, 'test': 4.5}
    settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5}
    settings.hinge_threshold = 0.05
    # settings.print_stats = ['Loss/total', 'Loss/iou', 'ClfTrain/init_loss', 'ClfTrain/test_loss']

    # Train datasets
    lasot_train = Lasot(settings.env.lasot_dir, split='train')
    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')
    trackingnet_train = TrackingNet(settings.env.trackingnet_dir,
                                    set_ids=list(range(4)))
    coco_train = MSCOCOSeq(settings.env.coco_dir)

    # Validation datasets
    got10k_val = Got10k(settings.env.got10k_dir, split='votval')

    # Data transform
    transform_joint = dltransforms.ToGrayscale(probability=0.05)

    transform_train = torchvision.transforms.Compose([
        dltransforms.ToTensorAndJitter(0.2),
        torchvision.transforms.Normalize(mean=settings.normalize_mean,
                                         std=settings.normalize_std)
    ])

    transform_val = torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize(mean=settings.normalize_mean,
                                         std=settings.normalize_std)
    ])

    # The tracking pairs processing module
    output_sigma = settings.output_sigma_factor / settings.search_area_factor
    proposal_params = {
        'min_iou': 0.1,
        'boxes_per_frame': 8,
        'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]
    }
    label_params = {
        'feature_sz': settings.feature_sz,
        'sigma_factor': output_sigma,
        'kernel_sz': settings.target_filter_sz
    }
    data_processing_train = processing.DiMPProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        mode='sequence',
        proposal_params=proposal_params,
        label_function_params=label_params,
        transform=transform_train,
        joint_transform=transform_joint)

    data_processing_val = processing.DiMPProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        mode='sequence',
        proposal_params=proposal_params,
        label_function_params=label_params,
        transform=transform_val,
        joint_transform=transform_joint)

    # Train sampler and loader
    dataset_train = sampler.DiMPSampler(
        [lasot_train, got10k_train, trackingnet_train, coco_train],
        [0.25, 1, 1, 1],
        samples_per_epoch=26000,
        max_gap=30,
        num_test_frames=3,
        num_train_frames=3,
        processing=data_processing_train)

    loader_train = LTRLoader('train',
                             dataset_train,
                             training=True,
                             batch_size=settings.batch_size,
                             num_workers=settings.num_workers,
                             shuffle=True,
                             drop_last=True,
                             stack_dim=1)

    # Validation samplers and loaders
    dataset_val = sampler.DiMPSampler([got10k_val], [1],
                                      samples_per_epoch=5000,
                                      max_gap=30,
                                      num_test_frames=3,
                                      num_train_frames=3,
                                      processing=data_processing_val)

    loader_val = LTRLoader('val',
                           dataset_val,
                           training=False,
                           batch_size=settings.batch_size,
                           num_workers=settings.num_workers,
                           shuffle=False,
                           drop_last=True,
                           epoch_interval=5,
                           stack_dim=1)

    # Create network and actor
    net = dimpnet.dimpnet18(filter_size=settings.target_filter_sz,
                            backbone_pretrained=True,
                            optim_iter=5,
                            clf_feat_norm=True,
                            final_conv=True,
                            optim_init_step=0.9,
                            optim_init_reg=0.1,
                            init_gauss_sigma=output_sigma *
                            settings.feature_sz,
                            num_dist_bins=100,
                            bin_displacement=0.1,
                            mask_init_factor=3.0,
                            target_mask_act='sigmoid',
                            score_act='relu')

    # Wrap the network for multi GPU training
    if settings.multi_gpu:
        net = MultiGPU(net, dim=1)

    objective = {
        'iou': nn.MSELoss(),
        'test_clf': ltr_losses.LBHinge(threshold=settings.hinge_threshold)
    }

    loss_weight = {
        'iou': 1,
        'test_clf': 100,
        'test_init_clf': 100,
        'test_iter_clf': 400
    }

    actor = actors.DiMPActor(net=net,
                             objective=objective,
                             loss_weight=loss_weight)

    # Optimizer
    optimizer = optim.Adam(
        [{
            'params': actor.net.classifier.filter_initializer.parameters(),
            'lr': 5e-5
        }, {
            'params': actor.net.classifier.filter_optimizer.parameters(),
            'lr': 5e-4
        }, {
            'params': actor.net.classifier.feature_extractor.parameters(),
            'lr': 5e-5
        }, {
            'params': actor.net.bb_regressor.parameters(),
            'lr': 1e-3
        }, {
            'params': actor.net.feature_extractor.parameters()
        }],
        lr=2e-4)

    lr_scheduler = optim.lr_scheduler.StepLR(optimizer,
                                             step_size=15,
                                             gamma=0.2)

    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer,
                         settings, lr_scheduler)

    trainer.train(50, load_latest=True, fail_safe=True)
예제 #8
0
파일: sep.py 프로젝트: danielism97/CFKD
def run(settings):
    # Most common settings are assigned in the settings struct
    settings.description = 'distilled ATOM IoUNet with default settings according to the paper.'
    settings.batch_size = 32
    settings.num_workers = 8
    settings.print_interval = 1
    settings.normalize_mean = [0.485, 0.456, 0.406]
    settings.normalize_std = [0.229, 0.224, 0.225]
    settings.search_area_factor = 5.0
    settings.feature_sz = 18
    settings.output_sz = settings.feature_sz * 16
    settings.center_jitter_factor = {'train': 0, 'test': 4.5}
    settings.scale_jitter_factor = {'train': 0, 'test': 0.5}

    # Train datasets
    lasot_train = Lasot(settings.env.lasot_dir, split='train')
    trackingnet_train = TrackingNet(settings.env.trackingnet_dir,
                                    set_ids=list(range(11)))
    coco_train = MSCOCOSeq(settings.env.coco_dir)

    # Validation datasets
    trackingnet_val = TrackingNet(settings.env.trackingnet_dir,
                                  set_ids=list(range(11, 12)))

    # The joint augmentation transform, that is applied to the pairs jointly
    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))

    # The augmentation transform applied to the training set (individually to each image in the pair)
    transform_train = tfm.Transform(
        tfm.ToTensorAndJitter(0.2),
        tfm.Normalize(mean=settings.normalize_mean,
                      std=settings.normalize_std))

    # The augmentation transform applied to the validation set (individually to each image in the pair)
    transform_val = tfm.Transform(
        tfm.ToTensor(),
        tfm.Normalize(mean=settings.normalize_mean,
                      std=settings.normalize_std))

    # Data processing to do on the training pairs
    proposal_params = {
        'min_iou': 0.1,
        'boxes_per_frame': 16,
        'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]
    }
    data_processing_train = processing.ATOMProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        mode='sequence',
        proposal_params=proposal_params,
        transform=transform_train,
        joint_transform=transform_joint)

    # Data processing to do on the validation pairs
    data_processing_val = processing.ATOMProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        mode='sequence',
        proposal_params=proposal_params,
        transform=transform_val,
        joint_transform=transform_joint)

    # The sampler for training
    dataset_train = sampler.ATOMSampler(
        [lasot_train, trackingnet_train, coco_train], [1, 1, 1],
        samples_per_epoch=1000 * settings.batch_size,
        max_gap=50,
        processing=data_processing_train)

    # The loader for training
    loader_train = LTRLoader('train',
                             dataset_train,
                             training=True,
                             batch_size=settings.batch_size,
                             num_workers=settings.num_workers,
                             shuffle=True,
                             drop_last=True,
                             stack_dim=1)

    # The sampler for validation
    dataset_val = sampler.ATOMSampler([trackingnet_val], [1],
                                      samples_per_epoch=500 *
                                      settings.batch_size,
                                      max_gap=50,
                                      processing=data_processing_val)

    # The loader for validation
    loader_val = LTRLoader('val',
                           dataset_val,
                           training=False,
                           batch_size=settings.batch_size,
                           num_workers=settings.num_workers,
                           shuffle=False,
                           drop_last=True,
                           epoch_interval=5,
                           stack_dim=1)

    # Load teacher network
    teacher_net = atom_models.atom_resnet18(backbone_pretrained=True)
    teacher_path = '/home/ddanier/CFKD/pytracking/networks/atom_default.pth'
    teacher_net = loading.load_weights(teacher_net, teacher_path, strict=True)
    print(
        '*******************Teacher net loaded successfully*******************'
    )

    # Create student network and actor
    student_net = atom_models.atom_mobilenetsmall(backbone_pretrained=False)

    ##########################################################
    ### Distil backbone first, turn off grad for regressor ###
    ##########################################################
    for p in student_net.bb_regressor.parameters():
        p.requires_grad_(False)

    objective = distillation.CFKDLoss(
        reg_loss=nn.MSELoss(),
        w_ts=0.,
        w_ah=0.,
        w_cf=0.01,
        w_fd=100.,
        cf_layers=['conv1', 'layer1', 'layer2', 'layer3'])
    actor = actors.AtomCompressionActor(student_net, teacher_net, objective)

    # Optimizer
    optimizer = optim.Adam(actor.student_net.feature_extractor.parameters(),
                           lr=1e-2)
    lr_scheduler = optim.lr_scheduler.StepLR(optimizer,
                                             step_size=15,
                                             gamma=0.1)

    # Create trainer
    trainer = LTRDistillationTrainer(actor, [loader_train, loader_val],
                                     optimizer, settings, lr_scheduler)

    # Run training (set fail_safe=False if you are debugging)
    trainer.train(50, load_latest=False, fail_safe=True)

    ########################################################
    ## Distil regressor next, turn off grad for backbone ###
    ########################################################
    for p in trainer.actor.student_net.bb_regressor.parameters():
        p.requires_grad_(True)
    for p in trainer.actor.student_net.feature_extractor.parameters():
        p.requires_grad_(False)

    objective = distillation.CFKDLoss(reg_loss=nn.MSELoss(),
                                      w_ts=1.,
                                      w_ah=0.1,
                                      w_cf=0.,
                                      w_fd=0.)
    trainer.actor.objective = objective

    # Optimizer
    trainer.optimizer = optim.Adam(
        trainer.actor.student_net.bb_regressor.parameters(), lr=1e-2)

    trainer.lr_scheduler = optim.lr_scheduler.StepLR(trainer.optimizer,
                                                     step_size=15,
                                                     gamma=0.1)

    # Run training (set fail_safe=False if you are debugging)
    trainer.train(100, load_latest=False, fail_safe=True)
예제 #9
0
def run(settings):
    # Most common settings are assigned in the settings struct
    settings.description = 'ATOM IoUNet with default settings.'
    settings.print_interval = 1  # How often to print loss and other info
    settings.batch_size = 64  # Batch size
    settings.num_workers = 4  # Number of workers for image loading
    settings.normalize_mean = [
        0.485, 0.456, 0.406
    ]  # Normalize mean (default pytorch ImageNet values)
    settings.normalize_std = [
        0.229, 0.224, 0.225
    ]  # Normalize std (default pytorch ImageNet values)
    settings.search_area_factor = 5.0  # Image patch size relative to target size
    settings.feature_sz = 18  # Size of feature map
    settings.output_sz = settings.feature_sz * 16  # Size of input image patches

    # Settings for the image sample and proposal generation
    settings.center_jitter_factor = {'train': 0, 'test': 4.5}
    settings.scale_jitter_factor = {'train': 0, 'test': 0.5}
    settings.proposal_params = {
        'min_iou': 0.1,
        'boxes_per_frame': 16,
        'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]
    }

    # Train datasets
    lasot_train = Lasot(split='train')
    trackingnet_train = TrackingNet(set_ids=list(range(11)))
    coco_train = MSCOCOSeq()

    # Validation datasets
    trackingnet_val = TrackingNet(set_ids=list(range(11, 12)))

    # The joint augmentation transform, that is applied to the pairs jointly
    transform_joint = dltransforms.ToGrayscale(probability=0.05)

    # The augmentation transform applied to the training set (individually to each image in the pair)
    transform_train = torchvision.transforms.Compose([
        dltransforms.ToTensorAndJitter(0.2),
        torchvision.transforms.Normalize(mean=settings.normalize_mean,
                                         std=settings.normalize_std)
    ])

    # The augmentation transform applied to the validation set (individually to each image in the pair)
    transform_val = torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize(mean=settings.normalize_mean,
                                         std=settings.normalize_std)
    ])

    # Data processing to do on the training pairs
    data_processing_train = processing.ATOMProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        mode='sequence',
        proposal_params=settings.proposal_params,
        transform=transform_train,
        joint_transform=transform_joint)

    # Data processing to do on the validation pairs
    data_processing_val = processing.ATOMProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        mode='sequence',
        proposal_params=settings.proposal_params,
        transform=transform_val,
        joint_transform=transform_joint)

    # The sampler for training
    dataset_train = sampler.ATOMSampler(
        [lasot_train, trackingnet_train, coco_train], [1, 1, 1],
        samples_per_epoch=1800 * settings.batch_size,
        max_gap=50,
        processing=data_processing_train)

    # The loader for training
    loader_train = LTRLoader('train',
                             dataset_train,
                             training=True,
                             batch_size=settings.batch_size,
                             num_workers=settings.num_workers,
                             shuffle=True,
                             drop_last=True,
                             stack_dim=1)

    # The sampler for validation
    dataset_val = sampler.ATOMSampler([trackingnet_val], [1],
                                      samples_per_epoch=500 *
                                      settings.batch_size,
                                      max_gap=50,
                                      processing=data_processing_val)

    # The loader for validation
    loader_val = LTRLoader('val',
                           dataset_val,
                           training=False,
                           batch_size=settings.batch_size,
                           num_workers=settings.num_workers,
                           shuffle=False,
                           drop_last=True,
                           epoch_interval=5,
                           stack_dim=1)

    # Create network
    net = atom_models.atom_resnet50(backbone_pretrained=True)
    # Set objective
    objective = nn.MSELoss()

    # Create actor, which wraps network and objective
    actor = actors.AtomActor(net=net, objective=objective)

    # Optimizer
    optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3)

    # Learning rate scheduler
    lr_scheduler = optim.lr_scheduler.StepLR(optimizer,
                                             step_size=15,
                                             gamma=0.2)

    # Create trainer
    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer,
                         settings, lr_scheduler)

    # Run training (set fail_safe=False if you are debugging)
    trainer.train(40, load_latest=True, fail_safe=False)
예제 #10
0
def run(settings):
    # Most common settings are assigned in the settings struct
    settings.description = 'Siam selection for detection with default settings.'
    settings.print_interval = 1                                 # How often to print loss and other info
    settings.batch_size = 1                                    # Batch size
    assert settings.batch_size==1,"only implement for batch_size 1"
    settings.num_workers = 4                                    # Number of workers for image loading
    settings.normalize_mean = [0.485, 0.456, 0.406]             # Normalize mean (default pytorch ImageNet values)
    settings.normalize_std = [0.229, 0.224, 0.225]              # Normalize std (default pytorch ImageNet values)
    settings.search_area_factor = 5.0                           # Image patch size relative to target size
    settings.feature_sz = 18                                    # Size of feature map
    settings.output_sz = settings.feature_sz * 16               # Size of input image patches

    # Settings for the image sample and proposal generation
    settings.center_jitter_factor = {'train': 0, 'test': 4.5}
    settings.scale_jitter_factor = {'train': 0, 'test': 0.5}
    settings.proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 16, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]}

    # Train datasets
    lasot_train = Lasot(split='train')
    trackingnet_train = TrackingNet(set_ids=list(range(11)))
    coco_train = MSCOCOSeq()

    # Validation datasets
    trackingnet_val = TrackingNet(set_ids=list(range(11,12)))

    # # The joint augmentation transform, that is applied to the pairs jointly
    # transform_joint = dltransforms.ToGrayscale(probability=0.05)
    #
    # # The augmentation transform applied to the training set (individually to each image in the pair)
    # transform_train = torchvision.transforms.Compose([dltransforms.ToTensorAndJitter(0.2),
    #                                                   torchvision.transforms.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)])
    #
    # # The augmentation transform applied to the validation set (individually to each image in the pair)
    # transform_val = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),
    #                                                 torchvision.transforms.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)])

    # Data processing to do on the training pairs
    # data_processing_train = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,
    #                                                   output_sz=settings.output_sz,
    #                                                   center_jitter_factor=settings.center_jitter_factor,
    #                                                   scale_jitter_factor=settings.scale_jitter_factor,
    #                                                   mode='sequence',
    #                                                   proposal_params=settings.proposal_params,
    #                                                   transform=transform_train,
    #                                                   joint_transform=transform_joint)
    #
    # # Data processing to do on the validation pairs
    # data_processing_val = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,
    #                                                 output_sz=settings.output_sz,
    #                                                 center_jitter_factor=settings.center_jitter_factor,
    #                                                 scale_jitter_factor=settings.scale_jitter_factor,
    #                                                 mode='sequence',
    #                                                 proposal_params=settings.proposal_params,
    #                                                 transform=transform_val,
    #                                                 joint_transform=transform_joint)

    img_transform = ImageTransform(
        size_divisor=32, mean=[123.675, 116.28, 103.53],std=[58.395, 57.12, 57.375],to_rgb=True)
    data_processing=processing.SiamSelProcessing(transform=img_transform)

    # The sampler for training
    dataset_train = sampler.ATOMSampler([lasot_train, trackingnet_train, coco_train], [1,1,1],
                                        samples_per_epoch=1000*settings.batch_size, max_gap=50*20,
                                        processing=data_processing)

    # The loader for training
    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,
                             shuffle=True, drop_last=True, stack_dim=1)

    # The sampler for validation
    dataset_val = sampler.ATOMSampler([trackingnet_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=50*20,
                                      processing=data_processing)

    # The loader for validation
    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,
                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)

    # Create network
    # net = atom_models.atom_resnet18(backbone_pretrained=True)
    net=SiamSelNet()

    # Set objective
    objective = nn.BCEWithLogitsLoss()

    # Create actor, which wraps network and objective
    actor = actors.SiamSelActor(net=net, objective=objective)

    # Optimizer
    optimizer = optim.Adam(actor.net.selector.parameters(), lr=1e-4,weight_decay=0.0001)

    # Learning rate scheduler
    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=40, gamma=0.2)
    # lr_scheduler = WarmupMultiStepLR(optimizer,[50*1000,80*1000])


    # Create trainer
    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)

    # Run training (set fail_safe=False if you are debugging)
    trainer.train(100, load_latest=True, fail_safe=True)

#larget frame gap
예제 #11
0
def run(settings):
    settings.description = 'Transformer-assisted tracker. Our baseline approach is SuperDiMP'
    settings.batch_size = 40
    settings.num_workers = 8
    settings.multi_gpu = True
    settings.print_interval = 1
    settings.normalize_mean = [0.485, 0.456, 0.406]
    settings.normalize_std = [0.229, 0.224, 0.225]
    settings.search_area_factor = 6.0
    settings.output_sigma_factor = 1 / 4
    settings.target_filter_sz = 4
    settings.feature_sz = 22
    settings.output_sz = settings.feature_sz * 16
    settings.center_jitter_factor = {'train': 3, 'test': 5.5}
    settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5}
    settings.hinge_threshold = 0.05
    # settings.print_stats = ['Loss/total', 'Loss/iou', 'ClfTrain/init_loss', 'ClfTrain/test_loss']

    # Train datasets
    lasot_train = Lasot(settings.env.lasot_dir, split='train')
    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')
    trackingnet_train = TrackingNet(settings.env.trackingnet_dir,
                                    set_ids=list(range(4)))
    coco_train = MSCOCOSeq(settings.env.coco_dir)

    # Validation datasets
    got10k_val = Got10k(settings.env.got10k_dir, split='votval')

    # Data transform
    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05),
                                    tfm.RandomHorizontalFlip(probability=0.5))

    transform_train = tfm.Transform(
        tfm.ToTensorAndJitter(0.2), tfm.RandomHorizontalFlip(probability=0.5),
        tfm.Normalize(mean=settings.normalize_mean,
                      std=settings.normalize_std))

    transform_val = tfm.Transform(
        tfm.ToTensor(),
        tfm.Normalize(mean=settings.normalize_mean,
                      std=settings.normalize_std))

    # The tracking pairs processing module
    output_sigma = settings.output_sigma_factor / settings.search_area_factor
    proposal_params = {
        'boxes_per_frame': 128,
        'gt_sigma': (0.05, 0.05),
        'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)]
    }
    label_params = {
        'feature_sz': settings.feature_sz,
        'sigma_factor': output_sigma,
        'kernel_sz': settings.target_filter_sz
    }
    label_density_params = {
        'feature_sz': settings.feature_sz,
        'sigma_factor': output_sigma,
        'kernel_sz': settings.target_filter_sz
    }

    data_processing_train = processing.KLDiMPProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        crop_type='inside_major',
        max_scale_change=1.5,
        mode='sequence',
        proposal_params=proposal_params,
        label_function_params=label_params,
        label_density_params=label_density_params,
        transform=transform_train,
        joint_transform=transform_joint)

    data_processing_val = processing.KLDiMPProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        crop_type='inside_major',
        max_scale_change=1.5,
        mode='sequence',
        proposal_params=proposal_params,
        label_function_params=label_params,
        label_density_params=label_density_params,
        transform=transform_val,
        joint_transform=transform_joint)

    # Train sampler and loader
    dataset_train = sampler.DiMPSampler(
        [lasot_train, got10k_train, trackingnet_train, coco_train],
        [1, 1, 1, 1],
        samples_per_epoch=50000,
        max_gap=500,
        num_test_frames=3,
        num_train_frames=3,
        processing=data_processing_train)

    loader_train = LTRLoader('train',
                             dataset_train,
                             training=True,
                             batch_size=settings.batch_size,
                             num_workers=settings.num_workers,
                             shuffle=True,
                             drop_last=True,
                             stack_dim=1)

    # Validation samplers and loaders
    dataset_val = sampler.DiMPSampler([got10k_val], [1],
                                      samples_per_epoch=10000,
                                      max_gap=500,
                                      num_test_frames=3,
                                      num_train_frames=3,
                                      processing=data_processing_val)

    loader_val = LTRLoader('val',
                           dataset_val,
                           training=False,
                           batch_size=settings.batch_size,
                           num_workers=settings.num_workers,
                           shuffle=False,
                           drop_last=True,
                           epoch_interval=5,
                           stack_dim=1)

    # Create network and actor
    net = dimpnet.dimpnet50(
        filter_size=settings.target_filter_sz,
        backbone_pretrained=True,
        optim_iter=5,
        clf_feat_norm=True,
        clf_feat_blocks=0,
        final_conv=True,
        out_feature_dim=512,
        optim_init_step=0.9,
        optim_init_reg=0.1,
        init_gauss_sigma=output_sigma * settings.feature_sz,
        num_dist_bins=100,
        bin_displacement=0.1,
        mask_init_factor=3.0,
        target_mask_act='sigmoid',
        score_act='relu',
        frozen_backbone_layers=['conv1', 'bn1', 'layer1', 'layer2'])

    # Wrap the network for multi GPU training
    if settings.multi_gpu:
        net = MultiGPU(net, dim=1)

    objective = {
        'bb_ce': klreg_losses.KLRegression(),
        'test_clf': ltr_losses.LBHinge(threshold=settings.hinge_threshold)
    }

    loss_weight = {
        'bb_ce': 0.01,
        'test_clf': 100,
        'test_init_clf': 100,
        'test_iter_clf': 400
    }

    actor = tracking_actors.KLDiMPActor(net=net,
                                        objective=objective,
                                        loss_weight=loss_weight)

    # Optimizer
    optimizer = optim.Adam(
        [{
            'params': actor.net.classifier.filter_initializer.parameters(),
            'lr': 5e-5
        }, {
            'params': actor.net.classifier.filter_optimizer.parameters(),
            'lr': 5e-4
        }, {
            'params': actor.net.classifier.feature_extractor.parameters(),
            'lr': 5e-5
        }, {
            'params': actor.net.classifier.transformer.parameters(),
            'lr': 1e-3
        }, {
            'params': actor.net.bb_regressor.parameters(),
            'lr': 1e-3
        }, {
            'params': actor.net.feature_extractor.layer3.parameters(),
            'lr': 2e-5
        }],
        lr=2e-4)

    lr_scheduler = optim.lr_scheduler.StepLR(optimizer,
                                             step_size=15,
                                             gamma=0.2)

    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer,
                         settings, lr_scheduler)

    trainer.train(50, load_latest=True, fail_safe=True)
예제 #12
0
from ltr.dataset import  MSCOCOSeq
coco_train = MSCOCOSeq()
train_frame_ids =[1,2,3]
for i in range(148924,coco_train.get_num_sequences()):
	print(i)
	coco_train.get_frames(i, train_frame_ids)
예제 #13
0
def run(settings):
    # Most common settings are assigned in the settings struct
    settings.device = 'cuda'
    settings.description = 'TransT with default settings.'
    settings.batch_size = 38
    settings.num_workers = 8
    settings.multi_gpu = True
    settings.print_interval = 1
    settings.normalize_mean = [0.485, 0.456, 0.406]
    settings.normalize_std = [0.229, 0.224, 0.225]
    settings.search_area_factor = 4.0
    settings.template_area_factor = 2.0
    settings.search_feature_sz = 32
    settings.template_feature_sz = 16
    settings.search_sz = settings.search_feature_sz * 8
    settings.temp_sz = settings.template_feature_sz * 8
    settings.center_jitter_factor = {'search': 3, 'template': 0}
    settings.scale_jitter_factor = {'search': 0.25, 'template': 0}

    # Transformer
    settings.position_embedding = 'sine'
    settings.hidden_dim = 256
    settings.dropout = 0.1
    settings.nheads = 8
    settings.dim_feedforward = 2048
    settings.featurefusion_layers = 4

    # Train datasets
    lasot_train = Lasot(settings.env.lasot_dir, split='train')
    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')
    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))
    coco_train = MSCOCOSeq(settings.env.coco_dir)

    # The joint augmentation transform, that is applied to the pairs jointly
    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))

    # The augmentation transform applied to the training set (individually to each image in the pair)
    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),
                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))

    # Data processing to do on the training pairs
    data_processing_train = processing.TransTProcessing(search_area_factor=settings.search_area_factor,
                                                      template_area_factor = settings.template_area_factor,
                                                      search_sz=settings.search_sz,
                                                      temp_sz=settings.temp_sz,
                                                      center_jitter_factor=settings.center_jitter_factor,
                                                      scale_jitter_factor=settings.scale_jitter_factor,
                                                      mode='sequence',
                                                      transform=transform_train,
                                                      joint_transform=transform_joint)

    # The sampler for training
    dataset_train = sampler.TransTSampler([lasot_train, got10k_train, coco_train, trackingnet_train], [1,1,1,1],
                                samples_per_epoch=1000*settings.batch_size, max_gap=100, processing=data_processing_train)

    # The loader for training
    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,
                             shuffle=True, drop_last=True, stack_dim=0)

    # Create network and actor
    model = transt_models.transt_resnet50(settings)

    # Wrap the network for multi GPU training
    if settings.multi_gpu:
        model = MultiGPU(model, dim=0)

    objective = transt_models.transt_loss(settings)
    n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print('number of params:', n_parameters)

    actor = actors.TranstActor(net=model, objective=objective)

    # Optimizer
    param_dicts = [
        {"params": [p for n, p in model.named_parameters() if "backbone" not in n and p.requires_grad]},
        {
            "params": [p for n, p in model.named_parameters() if "backbone" in n and p.requires_grad],
            "lr": 1e-5,
        },
    ]
    optimizer = torch.optim.AdamW(param_dicts, lr=1e-4,
                                  weight_decay=1e-4)
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 500)

    # Create trainer
    trainer = LTRTrainer(actor, [loader_train], optimizer, settings, lr_scheduler)

    # Run training (set fail_safe=False if you are debugging)
    trainer.train(1000, load_latest=True, fail_safe=True)
예제 #14
0
def run(settings):
    # Most common settings are assigned in the settings struct
    settings.base_model = ''
    settings.description = 'SiamMask_sharp with ResNet-50 backbone.'
    settings.print_interval = 100  # How often to print loss and other info
    settings.batch_size = 64  # Batch size
    settings.samples_per_epoch = 600000  # Number of training pairs per epoch
    settings.num_workers = 8  # Number of workers for image loading
    settings.search_area_factor = {'train': 1.0, 'test': 143. / 127.}
    settings.output_sz = {'train': 127, 'test': 143}
    settings.scale_type = 'context'
    settings.border_type = 'meanpad'

    # Settings for the image sample and label generation
    settings.center_jitter_factor = {'train': 0.2, 'test': 0.4}
    settings.scale_jitter_factor = {'train': 0.05, 'test': 0.18}
    settings.label_params = {
        'search_size': 143,
        'output_size': 3,
        'anchor_stride': 8,
        'anchor_ratios': [0.33, 0.5, 1, 2, 3],
        'anchor_scales': [8],
        'num_pos': 16,
        'num_neg': 16,
        'num_total': 64,
        'thr_high': 0.6,
        'thr_low': 0.3
    }
    settings.loss_weights = {'cls': 0., 'loc': 0., 'mask': 1}
    settings.neg = 0

    # Train datasets
    vos_train = YoutubeVOS()
    coco_train = MSCOCOSeq()

    # Validation datasets
    vos_val = vos_train

    # The joint augmentation transform, that is applied to the pairs jointly
    transform_joint = dltransforms.ToGrayscale(probability=0.25)

    # The augmentation transform applied to the training set (individually to each image in the pair)
    transform_exemplar = dltransforms.Transpose()
    transform_instance = dltransforms.Compose([
        dltransforms.Color(probability=1.0),
        dltransforms.Blur(probability=0.18),
        dltransforms.Transpose()
    ])
    transform_instance_mask = dltransforms.Transpose()

    # Data processing to do on the training pairs
    data_processing_train = processing.SiamProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        scale_type=settings.scale_type,
        border_type=settings.border_type,
        mode='sequence',
        label_params=settings.label_params,
        train_transform=transform_exemplar,
        test_transform=transform_instance,
        test_mask_transform=transform_instance_mask,
        joint_transform=transform_joint)

    # Data processing to do on the validation pairs
    data_processing_val = processing.SiamProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        scale_type=settings.scale_type,
        border_type=settings.border_type,
        mode='sequence',
        label_params=settings.label_params,
        transform=transform_exemplar,
        joint_transform=transform_joint)

    nums_per_epoch = settings.samples_per_epoch // settings.batch_size
    # The sampler for training
    dataset_train = sampler.MaskSampler([coco_train, vos_train], [1, 1],
                                        samples_per_epoch=nums_per_epoch *
                                        settings.batch_size,
                                        max_gap=100,
                                        processing=data_processing_train,
                                        neg=settings.neg)

    # The loader for training
    train_loader = loader.LTRLoader('train',
                                    dataset_train,
                                    training=True,
                                    batch_size=settings.batch_size,
                                    num_workers=settings.num_workers,
                                    stack_dim=0)

    # The sampler for validation
    dataset_val = sampler.MaskSampler([vos_val], [
        1,
    ],
                                      samples_per_epoch=100 *
                                      settings.batch_size,
                                      max_gap=100,
                                      processing=data_processing_val)

    # The loader for validation
    val_loader = loader.LTRLoader('val',
                                  dataset_val,
                                  training=False,
                                  batch_size=settings.batch_size,
                                  num_workers=settings.num_workers,
                                  stack_dim=0)

    # creat network, set objective, creat optimizer, learning rate scheduler, trainer
    with dygraph.guard():
        # Create network

        def scale_loss(loss):
            total_loss = 0
            for k in settings.loss_weights:
                total_loss += loss[k] * settings.loss_weights[k]
            return total_loss

        net = SiamMask_ResNet50_sharp(scale_loss=scale_loss)

        # Load parameters from the best_base_model
        if settings.base_model == '':
            raise Exception(
                'The base_model path is not setup. Check settings.base_model in "ltr/train_settings/siammask/siammask_res50_sharp.py".'
            )
        para_dict, _ = fluid.load_dygraph(settings.base_model)
        model_dict = net.state_dict()

        for key in model_dict.keys():
            if key in para_dict.keys():
                model_dict[key] = para_dict[key]

        net.set_dict(model_dict)

        # Define objective
        objective = {
            'cls': select_softmax_with_cross_entropy_loss,
            'loc': weight_l1_loss,
            'mask': select_mask_logistic_loss
        }

        # Create actor, which wraps network and objective
        actor = actors.SiamActor(net=net, objective=objective)

        # Set to training mode
        actor.train()

        # Define optimizer and learning rate
        decayed_lr = fluid.layers.exponential_decay(learning_rate=0.0005,
                                                    decay_steps=nums_per_epoch,
                                                    decay_rate=0.9,
                                                    staircase=True)
        lr_scheduler = LinearLrWarmup(learning_rate=decayed_lr,
                                      warmup_steps=5 * nums_per_epoch,
                                      start_lr=0.0001,
                                      end_lr=0.0005)

        optimizer = fluid.optimizer.Adam(
            parameter_list=net.mask_head.parameters() +
            net.refine_head.parameters(),
            learning_rate=lr_scheduler)

        trainer = LTRTrainer(actor, [train_loader, val_loader], optimizer,
                             settings, lr_scheduler)
        trainer.train(20, load_latest=False, fail_safe=False)
예제 #15
0
def run(settings):
    # Most common settings are assigned in the settings struct
    settings.description = 'ATOM using the probabilistic maximum likelihood trained regression model for bounding-box' \
                           'regression presented in [https://arxiv.org/abs/1909.12297].'
    settings.batch_size = 64
    settings.num_workers = 8
    settings.print_interval = 1
    settings.normalize_mean = [0.485, 0.456, 0.406]
    settings.normalize_std = [0.229, 0.224, 0.225]
    settings.search_area_factor = 5.0
    settings.feature_sz = 18
    settings.output_sz = settings.feature_sz * 16
    settings.center_jitter_factor = {'train': 0, 'test': 4.5}
    settings.scale_jitter_factor = {'train': 0, 'test': 0.5}

    # Train datasets
    lasot_train = Lasot(settings.env.lasot_dir, split='train')
    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')
    trackingnet_train = TrackingNet(settings.env.trackingnet_dir,
                                    set_ids=list(range(4)))
    coco_train = MSCOCOSeq(settings.env.coco_dir)

    # Validation datasets
    got10k_val = Got10k(settings.env.got10k_dir, split='votval')

    # The joint augmentation transform, that is applied to the pairs jointly
    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))

    # The augmentation transform applied to the training set (individually to each image in the pair)
    transform_train = tfm.Transform(
        tfm.ToTensorAndJitter(0.2),
        tfm.Normalize(mean=settings.normalize_mean,
                      std=settings.normalize_std))

    # The augmentation transform applied to the validation set (individually to each image in the pair)
    transform_val = tfm.Transform(
        tfm.ToTensor(),
        tfm.Normalize(mean=settings.normalize_mean,
                      std=settings.normalize_std))

    # Data processing to do on the training pairs
    proposal_params = {
        'boxes_per_frame': 128,
        'gt_sigma': (0, 0),
        'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)],
        'add_mean_box': True
    }
    data_processing_train = processing.KLBBregProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        mode='sequence',
        proposal_params=proposal_params,
        transform=transform_train,
        joint_transform=transform_joint)

    # Data processing to do on the validation pairs
    data_processing_val = processing.KLBBregProcessing(
        search_area_factor=settings.search_area_factor,
        output_sz=settings.output_sz,
        center_jitter_factor=settings.center_jitter_factor,
        scale_jitter_factor=settings.scale_jitter_factor,
        mode='sequence',
        proposal_params=proposal_params,
        transform=transform_val,
        joint_transform=transform_joint)

    # The sampler for training
    dataset_train = sampler.ATOMSampler(
        [lasot_train, got10k_train, trackingnet_train, coco_train],
        [1, 1, 1, 1],
        samples_per_epoch=1000 * settings.batch_size,
        max_gap=200,
        processing=data_processing_train)

    # The loader for training
    loader_train = LTRLoader('train',
                             dataset_train,
                             training=True,
                             batch_size=settings.batch_size,
                             num_workers=settings.num_workers,
                             shuffle=True,
                             drop_last=True,
                             stack_dim=1)

    # The sampler for validation
    dataset_val = sampler.ATOMSampler([got10k_val], [1],
                                      samples_per_epoch=500 *
                                      settings.batch_size,
                                      max_gap=200,
                                      processing=data_processing_val)

    # The loader for validation
    loader_val = LTRLoader('val',
                           dataset_val,
                           training=False,
                           batch_size=settings.batch_size,
                           num_workers=settings.num_workers,
                           shuffle=False,
                           drop_last=True,
                           epoch_interval=5,
                           stack_dim=1)

    # Create network and actor
    net = atom_models.atom_resnet18(backbone_pretrained=True)
    objective = klreg_losses.MLRegression()
    actor = bbreg_actors.AtomBBKLActor(net=net, objective=objective)

    # Optimizer
    optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3)
    lr_scheduler = optim.lr_scheduler.StepLR(optimizer,
                                             step_size=15,
                                             gamma=0.2)

    # Create trainer
    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer,
                         settings, lr_scheduler)

    # Run training (set fail_safe=False if you are debugging)
    trainer.train(50, load_latest=True, fail_safe=True)
예제 #16
0
def run(settings):
    # Most common settings are assigned in the settings struct
    settings.description = 'Siam selection for detection with default settings.'
    settings.print_interval = 1                                 # How often to print loss and other info
    settings.batch_size = 1                                    # Batch size
    assert settings.batch_size==1,"only implement for batch_size 1"
    settings.num_workers = 4                                    # Number of workers for image loading
    settings.normalize_mean = [0.485, 0.456, 0.406]             # Normalize mean (default pytorch ImageNet values)
    settings.normalize_std = [0.229, 0.224, 0.225]              # Normalize std (default pytorch ImageNet values)
    settings.search_area_factor = 5.0                           # Image patch size relative to target size
    settings.feature_sz = 18                                    # Size of feature map
    settings.output_sz = settings.feature_sz * 16               # Size of input image patches

    # Settings for the image sample and proposal generation
    settings.center_jitter_factor = {'train': 0, 'test': 4.5}
    settings.scale_jitter_factor = {'train': 0, 'test': 0.5}
    settings.proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 16, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]}

    # Train datasets
    lasot_train = Lasot(split='train')
    trackingnet_train = TrackingNet(set_ids=list(range(11)))
    coco_train = MSCOCOSeq()

    # Validation datasets
    trackingnet_val = TrackingNet(set_ids=list(range(11,12)))

    # # The joint augmentation transform, that is applied to the pairs jointly
    # transform_joint = dltransforms.ToGrayscale(probability=0.05)
    #
    # # The augmentation transform applied to the training set (individually to each image in the pair)
    # transform_train = torchvision.transforms.Compose([dltransforms.ToTensorAndJitter(0.2),
    #                                                   torchvision.transforms.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)])
    #
    # # The augmentation transform applied to the validation set (individually to each image in the pair)
    # transform_val = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),
    #                                                 torchvision.transforms.Normalize(mean=settings.normalize_mean, std=settings.normalize_std)])

    # Data processing to do on the training pairs
    # data_processing_train = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,
    #                                                   output_sz=settings.output_sz,
    #                                                   center_jitter_factor=settings.center_jitter_factor,
    #                                                   scale_jitter_factor=settings.scale_jitter_factor,
    #                                                   mode='sequence',
    #                                                   proposal_params=settings.proposal_params,
    #                                                   transform=transform_train,
    #                                                   joint_transform=transform_joint)
    #
    # # Data processing to do on the validation pairs
    # data_processing_val = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,
    #                                                 output_sz=settings.output_sz,
    #                                                 center_jitter_factor=settings.center_jitter_factor,
    #                                                 scale_jitter_factor=settings.scale_jitter_factor,
    #                                                 mode='sequence',
    #                                                 proposal_params=settings.proposal_params,
    #                                                 transform=transform_val,
    #                                                 joint_transform=transform_joint)

    img_transform = ImageTransform(
        size_divisor=32, mean=[123.675, 116.28, 103.53],std=[58.395, 57.12, 57.375],to_rgb=True)
    data_processing=processing.SiamSelProcessing(transform=img_transform)

    # The sampler for training
    dataset_train = sampler.ATOMSampler([lasot_train, trackingnet_train, coco_train], [1,1,1],
                                        samples_per_epoch=1000*settings.batch_size, max_gap=50,
                                        processing=data_processing)

    # The loader for training
    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,
                             shuffle=True, drop_last=True, stack_dim=1)

    # The sampler for validation
    dataset_val = sampler.ATOMSampler([trackingnet_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=50,
                                      processing=data_processing)

    # The loader for validation
    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,
                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)

    # Create network
    # net = atom_models.atom_resnet18(backbone_pretrained=True)
    net=SiamSelNet()

    # Set objective
    objective = nn.BCEWithLogitsLoss()

    # Create actor, which wraps network and objective
    actor = actors.SiamSelActor(net=net, objective=objective)

    # Optimizer
    optimizer = optim.Adam(actor.net.selector.parameters(), lr=1e-4)

    # Learning rate scheduler
    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=40, gamma=0.2)
    # lr_scheduler = WarmupMultiStepLR(optimizer,[50*1000,80*1000])


    # Create trainer
    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)

    # Run training (set fail_safe=False if you are debugging)
    trainer.train(100, load_latest=True, fail_safe=True)


# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
#     def __init__(
#         self,
#         optimizer,
#         milestones,
#         gamma=0.1,
#         warmup_factor=1.0 / 3,
#         warmup_iters=500,
#         warmup_method="linear",
#         last_epoch=-1,
#     ):
#         if not list(milestones) == sorted(milestones):
#             raise ValueError(
#                 "Milestones should be a list of" " increasing integers. Got {}",
#                 milestones,
#             )
#
#         if warmup_method not in ("constant", "linear"):
#             raise ValueError(
#                 "Only 'constant' or 'linear' warmup_method accepted"
#                 "got {}".format(warmup_method)
#             )
#         self.milestones = milestones
#         self.gamma = gamma
#         self.warmup_factor = warmup_factor
#         self.warmup_iters = warmup_iters
#         self.warmup_method = warmup_method
#         super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
#
#     def get_lr(self):
#         warmup_factor = 1
#         if self.last_epoch < self.warmup_iters:
#             if self.warmup_method == "constant":
#                 warmup_factor = self.warmup_factor
#             elif self.warmup_method == "linear":
#                 alpha = float(self.last_epoch) / self.warmup_iters
#                 warmup_factor = self.warmup_factor * (1 - alpha) + alpha
#         return [
#             base_lr
#             * warmup_factor
#             * self.gamma ** bisect_right(self.milestones, self.last_epoch)
#             for base_lr in self.base_lrs
#         ]