def parameters(): params = TrackerParams() # These are usually set from outside params.debug = 0 # Debug level params.visualization = False # Do visualization # Use GPU or not (IoUNet requires this to be True) params.use_gpu = True # Feature specific parameters deep_params = TrackerParams() # Patch sampling parameters params.exemplar_size = 127 params.instance_size = 287 params.base_size = 0 params.context_amount = 0.5 # Anchor parameters params.anchor_stride = 8 params.anchor_ratios = [0.33, 0.5, 1, 2, 3] params.anchor_scales = [8] # Tracking parameters params.penalty_k = 0.18 params.window_influence = 0.41 params.lr = 0.05 # Setup the feature extractor deep_fparams = FeatureParams(feature_params=[deep_params]) deep_feat = deep.SRPNAlexNet(fparams=deep_fparams) params.features = MultiResolutionExtractor([deep_feat]) return params
def parameters(): params = TrackerParams() # These are usually set from outside params.debug = 0 # Debug level params.visualization = False # Do visualization # Use GPU or not (IoUNet requires this to be True) params.use_gpu = True # Feature specific parameters deep_params = TrackerParams() # Patch sampling parameters params.exemplar_size = 127 params.max_image_sample_size = 255 * 255 # Maximum image sample size params.min_image_sample_size = 255 * 255 # Minimum image sample size # Detection parameters params.scale_factors = 1.0375**np.array( [-1, 0, 1] ) # What scales to use for localization (only one scale if IoUNet is used) params.score_upsample_factor = 16 # How much Fourier upsampling to use params.scale_penalty = 0.9745 params.scale_lr = 0.59 params.window_influence = 0.176 params.total_stride = 8 # Setup the feature extractor (which includes the IoUNet) deep_fparams = FeatureParams(feature_params=[deep_params]) deep_feat = deep.SFCAlexnet( net_path= '/ssd2/bily/code/baidu/personal-code/pytracking/ltr/checkpoints/ltr/fs/siamrpn50/SiamRPN_ep0001.pth.tar', output_layers=['conv5'], fparams=deep_fparams) params.features = MultiResolutionExtractor([deep_feat]) params.net_path = None params.response_up = 16 params.response_sz = 17 params.context = 0.5 params.instance_sz = 255 params.exemplar_sz = 127 params.scale_num = 3 params.scale_step = 1.0375 params.scale_lr = 0.59 params.scale_penalty = 0.9745 params.window_influence = 0.176 params.total_stride = 8 return params
def parameters(): params = TrackerParams() # These are usually set from outside params.debug = 0 # Debug level params.visualization = False # Do visualization # Use GPU or not (IoUNet requires this to be True) params.use_gpu = True # Feature specific parameters deep_params = TrackerParams() # Patch sampling parameters params.max_image_sample_size = (18 * 16)**2 # Maximum image sample size params.min_image_sample_size = (18 * 16)**2 # Minimum image sample size params.search_area_scale = 5 # Scale relative to target size params.feature_size_odd = False # Good to use False for even-sized kernels and vice versa # Optimization parameters params.CG_iter = 5 # The number of Conjugate Gradient iterations in each update after the first frame params.init_CG_iter = 60 # The total number of Conjugate Gradient iterations used in the first frame params.init_GN_iter = 6 # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated) params.post_init_CG_iter = 0 # CG iterations to run after GN params.fletcher_reeves = False # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient params.standard_alpha = True # Use the standard formula for computing the step length in Conjugate Gradient params.CG_forgetting_rate = None # Forgetting rate of the last conjugate direction # Learning parameters for each feature type deep_params.learning_rate = 0.01 # Learning rate deep_params.init_samples_minimum_weight = 0.25 # Minimum weight of initial samples in memory deep_params.output_sigma_factor = 1 / 4 # Standard deviation of Gaussian label relative to target size # Training parameters params.sample_memory_size = 250 # Memory size params.train_skipping = 10 # How often to run training (every n-th frame) # Online model parameters deep_params.kernel_size = (4, 4) # Kernel size of filter deep_params.compressed_dim = 64 # Dimension output of projection matrix deep_params.filter_reg = 1e-1 # Filter regularization factor deep_params.projection_reg = 1e-4 # Projection regularization factor # Windowing params.feature_window = False # Perform windowing of features params.window_output = False # Perform windowing of output scores # Detection parameters params.scale_factors = torch.ones( 1 ) # What scales to use for localization (only one scale if IoUNet is used) params.score_upsample_factor = 1 # How much Fourier upsampling to use # Init data augmentation parameters params.augmentation = { 'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45, -45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3, 1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6, -0.6)], 'dropout': (7, 0.2) } params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation params.random_shift_factor = 1 / 3 # How much random shift to do on each augmented sample deep_params.use_augmentation = True # Whether to use augmentation for this feature # Factorized convolution parameters # params.use_projection_matrix = True # Use projection matrix, i.e. use the factorized convolution formulation params.update_projection_matrix = True # Whether the projection matrix should be optimized or not params.proj_init_method = 'randn' # Method for initializing the projection matrix params.filter_init_method = 'randn' # Method for initializing the spatial filter params.projection_activation = 'none' # Activation function after projection ('none', 'relu', 'elu' or 'mlu') params.response_activation = ( 'mlu', 0.05 ) # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu') # Advanced localization parameters params.advanced_localization = True # Use this or not params.target_not_found_threshold = 0.25 # Absolute score threshold to detect target missing params.distractor_threshold = 0.8 # Relative threshold to find distractors params.hard_negative_threshold = 0.5 # Relative threshold to find hard negative samples params.target_neighborhood_scale = 2.2 # Target neighborhood to remove params.dispalcement_scale = 0.8 # Dispacement to consider for distractors params.hard_negative_learning_rate = 0.02 # Learning rate if hard negative detected params.hard_negative_CG_iter = 5 # Number of optimization iterations to use if hard negative detected params.update_scale_when_uncertain = True # Update scale or not if distractor is close # IoUNet parameters params.use_iou_net = True # Use IoU net or not params.box_refinement_space = 'relative' params.iounet_augmentation = False # Use the augmented samples to compute the modulation vector params.iounet_k = 3 # Top-k average to estimate final box params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes params.maximal_aspect_ratio = 6 # Limit on the aspect ratio params.box_refinement_iter = 10 # Number of iterations for refining the boxes params.box_refinement_step_length = ( 1e-2, 5e-2 ) # 1 # Gradient step length in the bounding box refinement 5e-3 2e-2 params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay) # Setup the feature extractor (which includes the IoUNet) deep_fparams = FeatureParams(feature_params=[deep_params]) deep_feat = deep.ATOMResNet18(net_path='atom_gmm_sampl', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2) params.features = MultiResolutionExtractor([deep_feat]) return params
def parameters(gpu_device=0): params = TrackerParams() params.debug = 0 params.visualization = True params.visdom_info = {'use_visdom': False} params.use_gpu = True # Feature specific parameters shallow_params = TrackerParams() deep_params = TrackerParams() # Conjugate Gradient parameters params.CG_iter = 5 # The number of Conjugate Gradient iterations in each update after the first frame params.init_CG_iter = 100 # The total number of Conjugate Gradient iterations used in the first frame params.post_init_CG_iter = 0 # CG iterations to run after GN params.fletcher_reeves = True # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient params.standard_alpha = True # Use the standard formula for computing the step length in Conjugate Gradient params.CG_forgetting_rate = 75 # Forgetting rate of the last conjugate direction params.precond_data_param = 0.7 # Weight of the data term in the preconditioner params.precond_reg_param = 0.02 # Weight of the regularization term in the preconditioner # Learning parameters shallow_params.learning_rate = 0.025 deep_params.learning_rate = 0.0075 shallow_params.output_sigma_factor = 1 / 12 deep_params.output_sigma_factor = 1 / 4 # Training parameters params.sample_memory_size = 200 # Memory size # Detection parameters params.score_fusion_strategy = 'weightedsum' # Fusion strategy shallow_params.translation_weight = 0.4 # Weight of this feature deep_params.translation_weight = 1 - shallow_params.translation_weight # Init augmentation parameters params.augmentation = { 'fliplr': False, 'blur': [(2, 0.2), (0.2, 2), (3, 1), (1, 3), (2, 2)], 'dropout': (7, 0.2) } # Whether to use augmentation for this feature deep_params.use_augmentation = True shallow_params.use_augmentation = True # Interpolation parameters params.interpolation_method = 'bicubic' # The kind of interpolation kernel params.interpolation_bicubic_a = -0.75 # The parameter for the bicubic interpolation kernel params.interpolation_centering = False # Center the kernel at the feature sample params.interpolation_windowing = False # Do additional windowing on the Fourier coefficients of the kernel # Regularization parameters shallow_params.use_reg_window = True # Use spatial regularization or not shallow_params.reg_window_min = 1e-4 # The minimum value of the regularization window shallow_params.reg_window_edge = 10e-3 # The impact of the spatial regularization shallow_params.reg_window_power = 2 # The degree of the polynomial to use (e.g. 2 is a quadratic window) shallow_params.reg_sparsity_threshold = 0.1 # A relative threshold of which DFT coefficients that should be set to zero deep_params.use_reg_window = True # Use spatial regularization or not deep_params.reg_window_min = 10e-4 # The minimum value of the regularization window deep_params.reg_window_edge = 50e-3 # The impact of the spatial regularization deep_params.reg_window_power = 2 # The degree of the polynomial to use (e.g. 2 is a quadratic window) deep_params.reg_sparsity_threshold = 0.1 # A relative threshold of which DFT coefficients that should be set to zero fparams = FeatureParams(feature_params=[shallow_params, deep_params]) features = deep.ResNet18m1(output_layers=['vggconv1', 'layer3'], use_gpu=params.use_gpu, gpu_device=gpu_device, fparams=fparams, pool_stride=[2, 1], normalize_power=2) params.features = MultiResolutionExtractor([features]) return params
def parameters(pth_path = None): params = TrackerParams() # These are usually set from outside params.debug = 1 # Debug level params.visualization = True # Do visualization # Use GPU or not (IoUNet requires this to be True) params.use_gpu = True # Feature specific parameters deep_params = TrackerParams() # Patch sampling parameters params.max_image_sample_size = (16 * 16) ** 2 # (18 * 16) ** 2 # Maximum image sample size params.min_image_sample_size = (16 * 16) ** 2 # (18 * 16) ** 2 # Minimum image sample size params.search_area_scale = 4.5 # Scale relative to target size params.feature_size_odd = False # Good to use False for even-sized kernels and vice versa # Optimization parameters params.CG_iter = 5 # The number of Conjugate Gradient iterations in each update after the first frame params.init_CG_iter = 60 # The total number of Conjugate Gradient iterations used in the first frame params.init_GN_iter = 6 # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated) params.post_init_CG_iter = 0 # CG iterations to run after GN params.fletcher_reeves = False # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient params.standard_alpha = True # Use the standard formula for computing the step length in Conjugate Gradient params.CG_forgetting_rate = None # Forgetting rate of the last conjugate direction # Learning parameters for each feature type deep_params.learning_rate = 0.0075 # Learning rate deep_params.output_sigma_factor = 1/4 # Standard deviation of Gaussian label relative to target size # Training parameters params.sample_memory_size = 250 # Memory size params.train_skipping = 10 # How often to run training (every n-th frame) # Online model parameters deep_params.kernel_size = (4, 4) # Kernel size of filter deep_params.compressed_dim = 64 # Dimension output of projection matrix deep_params.filter_reg = 1e-1 # Filter regularization factor deep_params.projection_reg = 1e-4 # Projection regularization factor # Windowing params.feature_window = False # Perform windowing of features params.window_output = True # Perform windowing of output scores # Detection parameters params.scale_factors = torch.ones(1) # What scales to use for localization (only one scale if IoUNet is used) params.score_upsample_factor = 1 # How much Fourier upsampling to use # Init data augmentation parameters params.augmentation = {'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)], 'relativeshift': [(0.25, 0.25), (-0.25, 0.25), (0.25, -0.25), (-0.25, -0.25), (0.75, 0.75), (-0.75, 0.75), (0.75, -0.75), (-0.75, -0.75)]} params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation params.random_shift_factor = 0#1 / 3 # How much random shift to do on each augmented sample deep_params.use_augmentation = True # Whether to use augmentation for this feature # Factorized convolution parameters # params.use_projection_matrix = True # Use projection matrix, i.e. use the factorized convolution formulation params.update_projection_matrix = True # Whether the projection matrix should be optimized or not params.proj_init_method = 'pca' # Method for initializing the projection matrix randn | pca params.filter_init_method = 'zeros' # Method for initializing the spatial filter randn | zeros params.projection_activation = 'none' # Activation function after projection ('none', 'relu', 'elu' or 'mlu') params.response_activation = ('mlu', 0.05) # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu') # Advanced localization parameters params.advanced_localization = True # Use this or not params.target_not_found_threshold = -1 # Absolute score threshold to detect target missing params.distractor_threshold = 100 # Relative threshold to find distractors params.hard_negative_threshold = 0.3 # Relative threshold to find hard negative samples params.target_neighborhood_scale = 2.2 # Target neighborhood to remove params.dispalcement_scale = 0.7 # Dispacement to consider for distractors params.hard_negative_learning_rate = 0.02 # Learning rate if hard negative detected params.hard_negative_CG_iter = 5 # Number of optimization iterations to use if hard negative detected params.update_scale_when_uncertain = True # Update scale or not if distractor is close # Setup the feature extractor (which includes the IoUNet) deep_fparams = FeatureParams(feature_params=[deep_params]) # use ResNet50 for filter params.use_resnet50 = True if params.use_resnet50: deep_feat_filter = deep.ATOMResNet50(output_layers=['layer3'], fparams=deep_fparams, normalize_power=2) # deep_feat2 = deep.DRNetSE50(net_path='SE_Res50.pth', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2) params.features_filter = MultiResolutionExtractor([deep_feat_filter]) #params.features_filter = MultiResolutionExtractor([deep_feat2]) params.vot_anno_conversion_type = 'preserve_area' params.use_segmentation = True env = env_settings() net_path = env.network_path if pth_path is None: pth_path = '/home/jaffe/PycharmProjects//DMB/pytracking/networks/recurrent25.pth.tar' params.pth_path = pth_path params.segm_use_dist = True params.segm_normalize_mean = [0.485, 0.456, 0.406] params.segm_normalize_std = [0.229, 0.224, 0.225] params.segm_search_area_factor = 4.0 params.segm_feature_sz = 24 params.segm_output_sz = params.segm_feature_sz * 16 params.segm_scale_estimation = True params.segm_optimize_polygon = True params.tracking_uncertainty_thr = 3 params.response_budget_sz = 25 params.uncertainty_segm_scale_thr = 3.5 params.uncertainty_segment_thr = 10 params.segm_pixels_ratio = 2 params.mask_pixels_budget_sz = 25 params.segm_min_scale = 0.2 params.max_rel_scale_ch_thr = 0.75 params.consider_segm_pixels_ratio = 1 params.opt_poly_overlap_thr = 0.3 params.poly_cost_a = 1.2 params.poly_cost_b = 1 params.segm_dist_map_type = 'center' # center | bbox params.min_scale_change_factor = 0.95 params.max_scale_change_factor = 1.05 params.init_segm_mask_thr = 0.5 params.segm_mask_thr = 0.5 params.masks_save_path = '' # params.masks_save_path = 'save-masks-path' params.save_mask = False if params.masks_save_path != '': params.save_mask = True return params
def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True deep_params = TrackerParams() params.image_sample_size = 14 * 16 params.search_area_scale = 4 params.feature_size_odd = False # Learning parameters params.sample_memory_size = 250 deep_params.learning_rate = 0.0075 deep_params.init_samples_minimum_weight = 0.0 params.train_skipping = 10 deep_params.output_sigma_factor = 1 / 4 # Net optimization params params.update_classifier = True params.net_opt_iter = 25 params.net_opt_update_iter = 3 params.net_opt_hn_iter = 3 params.scale_factors = torch.ones(1) # Spatial filter parameters deep_params.kernel_size = (4, 4) params.window_output = True # Detection parameters # params.score_upsample_factor = 1 # params.score_fusion_strategy = 'weightedsum' # deep_params.translation_weight = 1 # Init augmentation parameters # params.augmentation = {} params.augmentation = { 'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45, -45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3, 1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6, -0.6)], 'dropout': (7, 0.2) } params.augmentation_expansion_factor = 2 params.random_shift_factor = 1 / 3 deep_params.use_augmentation = True # Advanced localization parameters params.advanced_localization = True params.target_not_found_threshold = 0.0 params.distractor_threshold = 100 params.hard_negative_threshold = 0.3 params.target_neighborhood_scale = 2.2 params.dispalcement_scale = 0.7 params.perform_hn_without_windowing = True params.hard_negative_learning_rate = 0.02 params.update_scale_when_uncertain = True # IoUnet parameters params.iounet_augmentation = False params.iounet_use_log_scale = True params.iounet_k = 3 params.num_init_random_boxes = 9 params.box_jitter_pos = 0.1 params.box_jitter_sz = 0.5 params.maximal_aspect_ratio = 6 params.box_refinement_iter = 5 params.box_refinement_step_length = 1 params.box_refinement_step_decay = 1 deep_fparams = FeatureParams(feature_params=[deep_params]) deep_feat = trackernet.SimpleTrackerResNet18( net_path='sdlearn_300_onlytestloss_lr_causal_mg30_iou_coco', fparams=deep_fparams) params.features = MultiResolutionExtractor([deep_feat]) params.vot_anno_conversion_type = 'preserve_area' return params
def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True # Feature specific parameters shallow_params = TrackerParams() deep_params = TrackerParams() # Patch sampling parameters params.max_image_sample_size = 250**2 # Maximum image sample size params.min_image_sample_size = 200**2 # Minimum image sample size params.search_area_scale = 4.5 # Scale relative to target size # Conjugate Gradient parameters params.CG_iter = 5 # The number of Conjugate Gradient iterations in each update after the first frame params.init_CG_iter = 100 # The total number of Conjugate Gradient iterations used in the first frame params.init_GN_iter = 10 # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated) params.post_init_CG_iter = 0 # CG iterations to run after GN params.fletcher_reeves = False # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient params.standard_alpha = True # Use the standard formula for computing the step length in Conjugate Gradient params.CG_forgetting_rate = 75 # Forgetting rate of the last conjugate direction params.precond_data_param = 0.3 # Weight of the data term in the preconditioner params.precond_reg_param = 0.15 # Weight of the regularization term in the preconditioner params.precond_proj_param = 35 # Weight of the projection matrix part in the preconditioner # Learning parameters shallow_params.learning_rate = 0.025 deep_params.learning_rate = 0.0075 shallow_params.output_sigma_factor = 1 / 16 deep_params.output_sigma_factor = 1 / 4 # Training parameters params.sample_memory_size = 200 # Memory size params.train_skipping = 10 # How often to run training (every n-th frame) # Detection parameters params.scale_factors = 1.02**torch.arange( -2, 3).float() # What scales to use for localization params.score_upsample_factor = 1 # How much Fourier upsampling to use params.score_fusion_strategy = 'weightedsum' # Fusion strategy shallow_params.translation_weight = 0.4 # Weight of this feature deep_params.translation_weight = 1 - shallow_params.translation_weight # Init augmentation parameters params.augmentation = { 'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45, -45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3, 1), (1, 3), (2, 2)], 'shift': [(6, 6), (-6, 6), (6, -6), (-6, -6)], 'dropout': (7, 0.2) } # Whether to use augmentation for this feature deep_params.use_augmentation = True shallow_params.use_augmentation = True # Factorized convolution parameters # params.use_projection_matrix = True # Use projection matrix, i.e. use the factorized convolution formulation params.update_projection_matrix = True # Whether the projection matrix should be optimized or not # params.proj_init_method = 'pca' # Method for initializing the projection matrix params.projection_reg = 5e-8 # Regularization paremeter of the projection matrix shallow_params.compressed_dim = 16 # Dimension output of projection matrix for shallow features deep_params.compressed_dim = 64 # Dimension output of projection matrix for deep features # Interpolation parameters params.interpolation_method = 'bicubic' # The kind of interpolation kernel params.interpolation_bicubic_a = -0.75 # The parameter for the bicubic interpolation kernel params.interpolation_centering = True # Center the kernel at the feature sample params.interpolation_windowing = False # Do additional windowing on the Fourier coefficients of the kernel # Regularization parameters shallow_params.use_reg_window = True # Use spatial regularization or not shallow_params.reg_window_min = 1e-4 # The minimum value of the regularization window shallow_params.reg_window_edge = 10e-3 # The impact of the spatial regularization shallow_params.reg_window_power = 2 # The degree of the polynomial to use (e.g. 2 is a quadratic window) shallow_params.reg_sparsity_threshold = 0.05 # A relative threshold of which DFT coefficients that should be set to zero deep_params.use_reg_window = True # Use spatial regularization or not deep_params.reg_window_min = 10e-4 # The minimum value of the regularization window deep_params.reg_window_edge = 50e-3 # The impact of the spatial regularization deep_params.reg_window_power = 2 # The degree of the polynomial to use (e.g. 2 is a quadratic window) deep_params.reg_sparsity_threshold = 0.1 # A relative threshold of which DFT coefficients that should be set to zero fparams = FeatureParams(feature_params=[shallow_params, deep_params]) features = deep.ResNet18m1(output_layers=['vggconv1', 'layer3'], use_gpu=params.use_gpu, fparams=fparams, pool_stride=[2, 1], normalize_power=2) params.features = MultiResolutionExtractor([features]) params.metric_model_path = '/home/zj/tracking/metricNet/0py_mdnet_metric_similar/pyMDNet/models/metric_model_zj_57470.pt' params.lof_rate = 1.8 params.sim_rate = 0.3 params.lt = False return params
def parameters(): params = TrackerParams() # These are usually set from outside params.debug = 0 # Debug level params.visualization = False # Do visualization # Use GPU or not (IoUNet requires this to be True) params.use_gpu = True # Feature specific parameters deep_params = TrackerParams() # Patch sampling parameters params.max_image_sample_size = (320)**2 # Maximum image sample size params.min_image_sample_size = (320)**2 # Minimum image sample size params.search_area_scale = 5.5 # Scale relative to target size params.feature_size_odd = False # Good to use False for even-sized kernels and vice versa # Optimization parameters params.CG_iter = 5 # The number of Conjugate Gradient iterations in each update after the first frame params.init_CG_iter = 100 # The total number of Conjugate Gradient iterations used in the first frame params.init_GN_iter = 10 # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated) params.post_init_CG_iter = 0 # CG iterations to run after GN params.fletcher_reeves = False # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient params.standard_alpha = True # Use the standard formula for computing the step length in Conjugate Gradient params.CG_forgetting_rate = None # Forgetting rate of the last conjugate direction # Learning parameters for each feature type deep_params.learning_rate = 0.0075 # Learning rate deep_params.output_sigma_factor = [ 1 / 3, 1 / 4 ] # Standard deviation of Gaussian label relative to target size # Training parameters params.sample_memory_size = 250 # Memory size params.train_skipping = 10 # How often to run training (every n-th frame) # Online model parameters deep_params.kernel_size = [(4, 4), (4, 4)] # Kernel size of filter deep_params.compressed_dim = [32, 32] # Dimension output of projection matrix deep_params.filter_reg = 1e-1 # Filter regularization factor deep_params.projection_reg = 1e-4 # Projection regularization factor # Windowing params.feature_window = False # Perform windowing of features params.window_output = True # Perform windowing of output scores # Detection parameters params.scale_factors = torch.ones( 1 ) # What scales to use for localization (only one scale if IoUNet is used) params.score_upsample_factor = 1 # How much Fourier upsampling to use # Init data augmentation parameters params.augmentation = { 'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45, -45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3, 1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6, -0.6)], 'dropout': (7, 0.2) } params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation params.random_shift_factor = 1 / 3 # How much random shift to do on each augmented sample deep_params.use_augmentation = True # Whether to use augmentation for this feature # Factorized convolution parameters # params.use_projection_matrix = True # Use projection matrix, i.e. use the factorized convolution formulation params.update_projection_matrix = True # Whether the projection matrix should be optimized or not params.proj_init_method = 'randn' # Method for initializing the projection matrix params.filter_init_method = 'randn' # Method for initializing the spatial filter params.projection_activation = 'none' # Activation function after projection ('none', 'relu', 'elu' or 'mlu') params.response_activation = ( 'mlu', 0.05 ) # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu') # Advanced localization parameters params.advanced_localization = True # Use this or not params.target_not_found_threshold = -1 # Absolute score threshold to detect target missing params.distractor_threshold = 100 # Relative threshold to find distractors params.hard_negative_threshold = 0.3 # Relative threshold to find hard negative samples params.target_neighborhood_scale = 2.2 # Target neighborhood to remove params.dispalcement_scale = 0.9 # Dispacement to consider for distractors params.hard_negative_learning_rate = 0.0075 # Learning rate if hard negative detected params.hard_negative_CG_iter = 5 # Number of optimization iterations to use if hard negative detected params.update_scale_when_uncertain = True # Update scale or not if distractor is close params.alpha = 0 params.beta = -1 # Setup the feature extractor (which includes the IoUNet)i ATOMnet_ep0026.pth deep_fparams = FeatureParams(feature_params=[deep_params]) deep_feat = deep.DRNetMobileNetSmall(net_path='drnet_cfkd.pth.tar', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2) params.features = MultiResolutionExtractor([deep_feat]) params.vot_anno_conversion_type = 'preserve_area' return params
def parameters(): params = TrackerParams() # ++++++++++++++++++++++++++++ Parallel SiamMask +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ params.use_parallel_smask = True params.use_area_preserve = True params.parallel_smask_iou_threshold = 0.7 params.parallel_smask_area_preserve_threshold = 2 params.parallel_smask_config = osp.join(ROOT_DIR, 'pytracking/tracker/siamesemask/experiments/siammask/config_vot.json') params.parallel_smask_ckpt = osp.join(ROOT_DIR, 'pytracking/networks/SiamMask_VOT_LD.pth') params.use_smask_replace_atom = True # ++++++++++++++++++++++++++++ Sequential SiamMask +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ params.use_sequential_smask = True params.sequential_smask_ratio = 0.25 params.sequential_smask_config = osp.join(ROOT_DIR, 'pytracking/tracker/siamesemask_127/experiments/siammask/config_vot.json') params.sequential_smask_ckpt = osp.join(ROOT_DIR, 'pytracking/networks/SiamMask_VOT_LD.pth') # ++++++++++++++++++++++++++++ Refine ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ params.is_refine = False # use optimization algorithm to optimize mask params.is_fast_refine = False params.is_faster_refine = True params.angle_state = False params.soft_angle_state = False # ++++++++++++++++++++++++++++ ATOM PARAMS +++++++++++++++++++++++++++++++++++++++++++++++++++++ # Patch sampling parameters using area ratio params.use_adaptive_maximal_aspect_ratio = True params.use_area_ratio_adaptive_search_region = True params.area_ratio_adaptive_ratio = 0.005 params.use_area_ratio_prevent_zoom_in = True params.area_ratio_zoom_in_ratio = 0.75 params.feature_size_odd = False # Patch sampling parameters using current and mean max response speed params.use_speed_adaptive_search_region = True params.current_speed_threshold = 0.25 params.mean_speed_threshold = 0.20 params.center_distance_threshold = 0.3 # These are usually set from outside params.debug = 0 # Debug level params.visualization = False # Do visualization # Use GPU or not (IoUNet requires this to be True) params.use_gpu = True # Feature specific parameters deep_params = TrackerParams() # Optimization parameters params.CG_iter = 8 # The number of Conjugate Gradient iterations in each update after the first frame params.init_CG_iter = 60 # The total number of Conjugate Gradient iterations used in the first frame params.init_GN_iter = 6 # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated) params.post_init_CG_iter = 0 # CG iterations to run after GN params.fletcher_reeves = False # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient params.standard_alpha = True # Use the standard formula for computing the step length in Conjugate Gradient params.CG_forgetting_rate = None # Forgetting rate of the last conjugate direction # Learning parameters for each feature type deep_params.learning_rate = 0.0075 # Learning rate deep_params.output_sigma_factor = 1 / 4 # Standard deviation of Gaussian label relative to target size # Training parameters params.sample_memory_size = 250 # Memory size params.train_skipping = 5 # How often to run training (every n-th frame) # Online model parameters deep_params.kernel_size = (4, 4) # Kernel size of filter deep_params.compressed_dim = 768 # Dimension output of projection matrix deep_params.filter_reg = 1e-1 # Filter regularization factor deep_params.projection_reg = 1e-4 # Projection regularization factor # Windowing params.feature_window = False # Perform windowing of features params.window_output = True # Perform windowing of output scores # Detection parameters params.scale_factors = torch.Tensor([1.04 ** x for x in [-2, -1, 0, 1, 2]]) # Multi scale Test params.score_upsample_factor = 1 # How much Fourier upsampling to use # Init data augmentation parameters params.augmentation = {'fliplr': True, 'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45, -45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3, 1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6, -0.6)], 'dropout': (7, 0.2)} params.augmentation_expansion_factor = 2 # How much to expand sample when doing augmentation params.random_shift_factor = 1 / 3 # How much random shift to do on each augmented sample deep_params.use_augmentation = True # Whether to use augmentation for this feature # Factorized convolution parameters # params.use_projection_matrix = True # Use projection matrix, i.e. use the factorized convolution formulation params.update_projection_matrix = True # Whether the projection matrix should be optimized or not params.proj_init_method = 'randn' # Method for initializing the projection matrix params.filter_init_method = 'randn' # Method for initializing the spatial filter params.projection_activation = 'none' # Activation function after projection ('none', 'relu', 'elu' or 'mlu') params.response_activation = ( 'mlu', 0.05) # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu') # Advanced localization parameters params.advanced_localization = True # Use this or not params.target_not_found_threshold = -1 # Absolute score threshold to detect target missing params.distractor_threshold = 100 # Relative threshold to find distractors params.hard_negative_threshold = 0.3 # Relative threshold to find hard negative samples params.target_neighborhood_scale = 2.2 # Target neighborhood to remove params.dispalcement_scale = 0.7 # Dispacement to consider for distractors params.hard_negative_learning_rate = 0.02 # Learning rate if hard negative detected params.hard_negative_CG_iter = 5 # Number of optimization iterations to use if hard negative detected params.update_scale_when_uncertain = True # Update scale or not if distractor is close # IoUNet parameters params.iounet_augmentation = False # Use the augmented samples to compute the modulation vector params.iounet_k = 3 # Top-k average to estimate final box params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes params.maximal_aspect_ratio = 6 # Limit on the aspect ratio params.box_refinement_iter = 10 # Number of iterations for refining the boxes params.box_refinement_step_length = 1 # Gradient step length in the bounding box refinement params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay) # Setup the feature extractor (which includes the IoUNet) deep_fparams = FeatureParams(feature_params=[deep_params]) deep_feat = deep.ATOMResNet50(net_path='atom_vid_lasot_coco_resnet50_fpn_ATOMnet_ep0040.pth.tar', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2) params.features = MultiResolutionExtractor([deep_feat]) params.vot_anno_conversion_type = 'preserve_area' return params
def parameters(): params = TrackerParams() params.debug = 0 params.visualization = True params.visdom_info = {'use_visdom': False} params.use_gpu = True # Feature specific parameters shallow_params = TrackerParams() # Conjugate Gradient parameters params.CG_iter = 5 # The number of Conjugate Gradient iterations in each update after the first frame params.init_CG_iter = 50 # The total number of Conjugate Gradient iterations used in the first frame params.init_GN_iter = 10 # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated) params.post_init_CG_iter = 0 # CG iterations to run after GN params.fletcher_reeves = False # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient params.standard_alpha = True # Use the standard formula for computing the step length in Conjugate Gradient params.CG_forgetting_rate = 75 # Forgetting rate of the last conjugate direction params.precond_data_param = 0.3 # Weight of the data term in the preconditioner params.precond_reg_param = 0.15 # Weight of the regularization term in the preconditioner params.precond_proj_param = 35 # Weight of the projection matrix part in the preconditioner # Learning parameters shallow_params.learning_rate = 0.025 shallow_params.output_sigma_factor = 1 / 4 #1/8 #1/16#1.0#1/16 # Training parameters params.sample_memory_size = 200 # Memory size params.train_skipping = 201 # How often to run training (every n-th frame) # Detection parameters params.score_upsample_factor = 1 # How much Fourier upsampling to use params.score_fusion_strategy = 'weightedsum' # Fusion strategy shallow_params.translation_weight = 1.0 # Weight of this feature # Init augmentation parameters params.augmentation = { 'fliplr': False, #'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60], 'blur': [(2, 0.2), (0.2, 2), (3, 1), (1, 3), (2, 2)], #'shift': [(6, 6), (-6, 6), (6, -6), (-6,-6)], 'dropout': (7, 0.2) } # Whether to use augmentation for this feature #deep_params.use_augmentation = True shallow_params.use_augmentation = True # Interpolation parameters params.interpolation_method = 'bicubic' # The kind of interpolation kernel params.interpolation_bicubic_a = -0.75 # The parameter for the bicubic interpolation kernel params.interpolation_centering = True # Center the kernel at the feature sample params.interpolation_windowing = False # Do additional windowing on the Fourier coefficients of the kernel # Regularization parameters shallow_params.use_reg_window = True # Use spatial regularization or not shallow_params.reg_window_min = 1e-4 # The minimum value of the regularization window shallow_params.reg_window_edge = 10e-3 # The impact of the spatial regularization shallow_params.reg_window_power = 2 # The degree of the polynomial to use (e.g. 2 is a quadratic window) shallow_params.reg_sparsity_threshold = 0.05 # A relative threshold of which DFT coefficients that should be set to zero fparams = FeatureParams(feature_params=[shallow_params]) features = color.RGB(fparams=fparams, pool_stride=None, output_size=None, normalize_power=None, use_for_color=True, use_for_gray=False) params.features = MultiResolutionExtractor([features]) return params
params.target_neighborhood_scale = 2.2 # Target neighborhood to remove params.dispalcement_scale = 0.8 # Dispacement to consider for distractors params.hard_negative_learning_rate = 0.02 # Learning rate if hard negative detected params.hard_negative_CG_iter = 5 # Number of optimization iterations to use if hard negative detected params.update_scale_when_uncertain = True # Update scale or not if distractor is close # IoUNet parameters params.use_iou_net = True # Use IoU net or not params.iounet_augmentation = False # Use the augmented samples to compute the modulation vector params.iounet_k = 3 # Top-k average to estimate final box params.num_init_random_boxes = 9 # Num extra random boxes in addition to the classifier prediction params.box_jitter_pos = 0.1 # How much to jitter the translation for random boxes params.box_jitter_sz = 0.5 # How much to jitter the scale for random boxes params.maximal_aspect_ratio = 6 # Limit on the aspect ratio params.box_refinement_iter = 5 # Number of iterations for refining the boxes params.box_refinement_step_length = 1 # Gradient step length in the bounding box refinement params.box_refinement_step_decay = 1 # Multiplicative step length decay (1 means no decay) envs = EnvSettings() # Setup the feature extractor (which includes the IoUNet) deep_fparams = FeatureParams(feature_params=[deep_params]) <<<<<<< HEAD deep_feat = deep.DepthResNet50(net_path='depth/depth/', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2) ======= deep_feat = deep.ATOMResNet18(net_path=envs.checkpoints_path, output_layers=['layer3'], fparams=deep_fparams, normalize_power=2) # deep_feat = deep.DepthResNet50(net_path=envs.checkpoints_path, output_layers=['layer3'], fparams=deep_fparams, normalize_power=2) >>>>>>> 9350f30c7faa82d7ba81152d0272f03b0103ee23 params.features = MultiResolutionExtractor([deep_feat]) return params