Example #1
0
def load_network(net_path, **kwargs):
    """Load network for tracking.
    args:
        net_path - Path to network. If it is not an absolute path, it is relative to the network_path in the local.py.
                   See ltr.admin.loading.load_network for further details.
        **kwargs - Additional key-word arguments that are sent to ltr.admin.loading.load_network.
    """
    kwargs['backbone_pretrained'] = False
    if os.path.isabs(net_path):
        path_full = net_path
        net, _ = ltr_loading.load_network(path_full, **kwargs)
    elif isinstance(env_settings().network_path, (list, tuple)):
        net = None
        for p in env_settings().network_path:
            path_full = os.path.join(p, net_path)
            try:
                net, _ = ltr_loading.load_network(path_full, **kwargs)
                break
            except Exception as e:
                # print(e)
                pass

        assert net is not None, 'Failed to load network'
    else:
        path_full = os.path.join(env_settings().network_path, net_path)
        net, _ = ltr_loading.load_network(path_full, **kwargs)

    return net
Example #2
0
    def initialize(self):
        if os.path.isabs(self.net_path):
            net_path_full = self.net_path
        else:
            net_path_full = os.path.join(env_settings().network_path, self.net_path)
        # self.net, _ = load_network(net_path_full, backbone_pretrained=False)
        self.net = atom_resnet50(iou_input_dim=(512,1024), iou_inter_dim=(256,256), backbone_pretrained=False)
        # self.net = drnet_resnet50(iou_input_dim=(512,1024), iou_inter_dim=(256,256), backbone_pretrained=False)
        # print(net_path_full)
        self.net.load_state_dict(torch.load(net_path_full)['net'])
        if self.use_gpu:
            self.net.cuda()
        self.net.eval()

        self.iou_predictor = self.net.bb_regressor

        self.layer_stride = {'conv1': 2, 'layer1': 4, 'layer2': 8, 'layer3': 16, 'layer4': 32, 'classification': 16, 'fc': None}
        self.layer_dim = {'conv1': 64, 'layer1': 64, 'layer2': 128, 'layer3': 256, 'layer4': 512, 'classification': 256,'fc': None}

        self.iounet_feature_layers = self.net.bb_regressor_layer

        if isinstance(self.pool_stride, int) and self.pool_stride == 1:
            self.pool_stride = [1]*len(self.output_layers)

        self.feature_layers = sorted(list(set(self.output_layers + self.iounet_feature_layers)))

        self.mean = torch.Tensor([0.485, 0.456, 0.406]).view(1,-1,1,1)
        self.std = torch.Tensor([0.229, 0.224, 0.225]).view(1,-1,1,1)
Example #3
0
def check_and_load_precomputed_results(trackers,
                                       dataset,
                                       report_name,
                                       force_evaluation=False,
                                       **kwargs):
    # Load data
    settings = env_settings()

    # Load pre-computed results
    result_plot_path = os.path.join(settings.result_plot_path, report_name)
    eval_data_path = os.path.join(result_plot_path, 'eval_data.pkl')

    if os.path.isfile(eval_data_path) and not force_evaluation:
        with open(eval_data_path, 'rb') as fh:
            eval_data = pickle.load(fh)
    else:
        # print('Pre-computed evaluation data not found. Computing results!')
        eval_data = extract_results(trackers, dataset, report_name, **kwargs)

    if not check_eval_data_is_valid(eval_data, trackers, dataset):
        # print('Pre-computed evaluation data invalid. Re-computing results!')
        eval_data = extract_results(trackers, dataset, report_name, **kwargs)
    else:
        # Update display names
        tracker_names = [{
            'name': t.name,
            'param': t.parameter_name,
            'run_id': t.run_id,
            'disp_name': t.display_name
        } for t in trackers]
        eval_data['trackers'] = tracker_names

    return eval_data
Example #4
0
    def initialize(self):

        if isinstance(self.pool_stride, int) and self.pool_stride == 1:
            self.pool_stride = [1] * len(self.output_layers)

        self.layer_stride = {'vggconv1': 2, 'conv1': 2, 'layer1': 4, 'layer2': 8, 'layer3': 16, 'layer4': 32,
                             'fc': None}
        self.layer_dim = {'vggconv1': 96, 'conv1': 64, 'layer1': 64, 'layer2': 128, 'layer3': 256, 'layer4': 512,
                          'fc': None}

        self.mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1)
        self.std = torch.Tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1)

        if os.path.isabs(self.net_path):
            net_path_full = [self.net_path]
        else:
            root_paths = env_settings().network_path
            if isinstance(root_paths, str):
                root_paths = [root_paths]
            net_path_full = [os.path.join(root, self.net_path) for root in root_paths]

        self.net = None
        for net_path in net_path_full:
            try:
                self.net = resnet18_vggmconv1(self.output_layers, path=net_path)
                break
            except:
                pass
        if self.net is None:
            raise Exception('Did not find network file {}'.format(self.net_path))

        if self.use_gpu:
            self.net.cuda()
        self.net.eval()
Example #5
0
def plot_got_success(trackers, report_name):
    """ Plot success plot for GOT-10k dataset using the json reports.
    Save the json reports from http://got-10k.aitestunion.com/leaderboard in the directory set to
    env_settings.got_reports_path

    The tracker name in the experiment file should be set to the name of the report file for that tracker,
    e.g. DiMP50_report_2019_09_02_15_44_25 if the report is name DiMP50_report_2019_09_02_15_44_25.json

    args:
        trackers - List of trackers to evaluate
        report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved
    """
    # Load data
    settings = env_settings()
    plot_draw_styles = get_plot_draw_styles()

    result_plot_path = os.path.join(settings.result_plot_path, report_name)

    auc_curve = torch.zeros((len(trackers), 101))
    scores = torch.zeros(len(trackers))

    # Load results
    tracker_names = []
    for trk_id, trk in enumerate(trackers):
        json_path = '{}/{}.json'.format(settings.got_reports_path, trk.name)

        if os.path.isfile(json_path):
            with open(json_path, 'r') as f:
                eval_data = json.load(f)
        else:
            raise Exception('Report not found {}'.format(json_path))

        if len(eval_data.keys()) > 1:
            raise Exception

        # First field is the tracker name. Index it out
        eval_data = eval_data[list(eval_data.keys())[0]]
        if 'succ_curve' in eval_data.keys():
            curve = eval_data['succ_curve']
            ao = eval_data['ao']
        elif 'overall' in eval_data.keys() and 'succ_curve' in eval_data['overall'].keys():
            curve = eval_data['overall']['succ_curve']
            ao = eval_data['overall']['ao']
        else:
            raise Exception('Invalid JSON file {}'.format(json_path))

        auc_curve[trk_id, :] = torch.tensor(curve) * 100.0
        scores[trk_id] = ao * 100.0

        tracker_names.append({'name': trk.name, 'param': trk.parameter_name, 'run_id': trk.run_id,
                              'disp_name': trk.display_name})

    threshold_set_overlap = torch.arange(0.0, 1.01, 0.01, dtype=torch.float64)

    success_plot_opts = {'plot_type': 'success', 'legend_loc': 'lower left', 'xlabel': 'Overlap threshold',
                         'ylabel': 'Overlap Precision [%]', 'xlim': (0, 1.0), 'ylim': (0, 100), 'title': 'Success plot'}
    plot_draw_save(auc_curve, threshold_set_overlap, scores, tracker_names, plot_draw_styles, result_plot_path,
                   success_plot_opts)
    plt.show()
Example #6
0
def pack_got10k_results(tracker_name, param_name, output_name):
    """ Packs got10k results into a zip folder which can be directly uploaded to the evaluation server. The packed
    file is saved in the folder env_settings().got_packed_results_path

    args:
        tracker_name - name of the tracker
        param_name - name of the parameter file
        output_name - name of the packed zip file
    """
    output_path = os.path.join(env_settings().got_packed_results_path,
                               output_name)

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    results_path = env_settings().results_path
    for i in range(1, 181):
        seq_name = 'GOT-10k_Test_{:06d}'.format(i)

        seq_output_path = '{}/{}'.format(output_path, seq_name)
        if not os.path.exists(seq_output_path):
            os.makedirs(seq_output_path)

        for run_id in range(3):
            res = np.loadtxt('{}/{}/{}_{:03d}/{}.txt'.format(
                results_path, tracker_name, param_name, run_id, seq_name),
                             dtype=np.float64)
            times = np.loadtxt('{}/{}/{}_{:03d}/{}_time.txt'.format(
                results_path, tracker_name, param_name, run_id, seq_name),
                               dtype=np.float64)

            np.savetxt('{}/{}_{:03d}.txt'.format(seq_output_path, seq_name,
                                                 run_id + 1),
                       res,
                       delimiter=',',
                       fmt='%f')
            np.savetxt('{}/{}_time.txt'.format(seq_output_path, seq_name),
                       times,
                       fmt='%f')

    # Generate ZIP file
    shutil.make_archive(output_path, 'zip', output_path)

    # Remove raw text files
    shutil.rmtree(output_path)
Example #7
0
def pack_trackingnet_results(tracker_name, param_name, run_id=None, output_name=None):
    """ Packs trackingnet results into a zip folder which can be directly uploaded to the evaluation server. The packed
    file is saved in the folder env_settings().tn_packed_results_path

    args:
        tracker_name - name of the tracker
        param_name - name of the parameter file
        run_id - run id for the tracker
        output_name - name of the packed zip file
    """

    if output_name is None:
        if run_id is None:
            output_name = '{}_{}'.format(tracker_name, param_name)
        else:
            output_name = '{}_{}_{:03d}'.format(tracker_name, param_name, run_id)
    if env_settings().packed_results_path == '':
        raise RuntimeError('YOU HAVE NOT SETUP YOUR tn_packed_results_path in local.py!!!\n Go to "pytracking.evaluation.local" to set the path. '
                    'Then try to run again.')
    output_path = os.path.join(env_settings().packed_results_path, tracker_name, output_name)



    results_path = env_settings().results_path

    tn_dataset = TrackingNetDataset()

    for seq in tn_dataset:
        seq_name = seq.name
        if run_id is None:
            seq_results_path = '{}/{}/{}/{}.txt'.format(results_path, tracker_name, param_name, seq_name)
        else:
            seq_results_path = '{}/{}/{}_{:03d}/{}.txt'.format(results_path, tracker_name, param_name, run_id, seq_name)

        results = np.loadtxt(seq_results_path, dtype=np.float64)
        if not os.path.exists(output_path):
            os.makedirs(output_path)
        np.savetxt('{}/{}.txt'.format(output_path, seq_name), results, delimiter=',', fmt='%.2f')

    # Generate ZIP file
    shutil.make_archive(output_path, 'zip', output_path)

    # Remove raw text files
    shutil.rmtree(output_path)
Example #8
0
def pack_trackingnet_results(tracker_name, param_name, run_id=None, output_name=None):
    """ Packs trackingnet results into a zip folder which can be directly uploaded to the evaluation server. The packed
    file is saved in the folder env_settings().tn_packed_results_path

    args:
        tracker_name - name of the tracker
        param_name - name of the parameter file
        run_id - run id for the tracker
        output_name - name of the packed zip file
    """

    if output_name is None:
        if run_id is None:
            output_name = '{}_{}'.format(tracker_name, param_name)
        else:
            output_name = '{}_{}_{:03d}'.format(tracker_name, param_name, run_id)

    output_path = os.path.join(env_settings().tn_packed_results_path, output_name)

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    results_path = env_settings().results_path

    tn_dataset = get_dataset('trackingnet')

    for seq in tn_dataset:
        seq_name = seq.name

        if run_id is None:
            seq_results_path = '{}/{}/{}/{}.txt'.format(results_path, tracker_name, param_name, seq_name)
        else:
            seq_results_path = '{}/{}/{}_{:03d}/{}.txt'.format(results_path, tracker_name, param_name, run_id, seq_name)

        results = np.loadtxt(seq_results_path, dtype=np.float64)

        np.savetxt('{}/{}.txt'.format(output_path, seq_name), results, delimiter=',', fmt='%.2f')

    # Generate ZIP file
    shutil.make_archive(output_path, 'zip', output_path)

    # Remove raw text files
    shutil.rmtree(output_path)
Example #9
0
def load_network(net_path):
    if os.path.isabs(net_path):
        path_full = net_path
        net, _ = ltr_loading.load_network(path_full, backbone_pretrained=False)
    elif isinstance(env_settings().network_path, (list, tuple)):
        net = None
        for p in env_settings().network_path:
            path_full = os.path.join(p, net_path)
            try:
                net, _ = ltr_loading.load_network(path_full, backbone_pretrained=False)
                break
            except:
                pass

        assert net is not None, 'Failed to load network'
    else:
        path_full = os.path.join(env_settings().network_path, net_path)
        net, _ = ltr_loading.load_network(path_full, backbone_pretrained=False)

    return net
Example #10
0
def main(tracker_name, param_name, run_id=None, output_name=None):

    if output_name is None:
        if run_id is None:
            output_name = '{}_{}'.format(tracker_name, param_name)
        else:
            output_name = '{}_{}_{:03d}'.format(tracker_name, param_name,
                                                run_id)
    if env_settings().packed_results_path == '':
        raise RuntimeError(
            'YOU HAVE NOT SETUP YOUR tn_packed_results_path in local.py!!!\n Go to "pytracking.evaluation.local" to set the path. '
            'Then try to run again.')
    output_path = os.path.join(env_settings().packed_results_path,
                               tracker_name, output_name)
    got_results_dir = os.path.join(env_settings().packed_results_path,
                                   tracker_name, param_name)

    list = os.listdir(got_results_dir)
    for f in list:
        if f.split('.')[0][-4:] == 'time':
            print(f)
            prefix = f.split('.')[0][:-5]
            print(prefix)
            os.makedirs(os.path.join(got_results_dir, prefix))
            shutil.move(os.path.join(got_results_dir, f),
                        os.path.join(got_results_dir, prefix, f))
            shutil.move(os.path.join(got_results_dir, prefix + '.txt'),
                        os.path.join(got_results_dir, prefix, prefix + '.txt'))

    path = os.listdir(got_results_dir)
    for p in path:
        for file in os.listdir(os.path.join(got_results_dir, p)):
            if file.split('.')[0][-1] != 'e':
                os.rename(
                    os.path.join(got_results_dir, p, file),
                    os.path.join(got_results_dir, p,
                                 file.split('.')[0] + '_001.txt'))

    shutil.make_archive(output_path, 'zip', got_results_dir)
Example #11
0
    def initialize(self):
        if os.path.isabs(self.net_path):
            net_path_full = self.net_path
        else:
            net_path_full = os.path.join(env_settings().network_path,
                                         self.net_path)

        self.net, _ = load_network(net_path_full, backbone_pretrained=False)

        self.net.cuda()
        self.net.eval()

        self.mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, -1, 1,
                                                             1).cuda()
        self.std = torch.Tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1).cuda()

        self.layer_stride = {
            'conv1': 2,
            'layer1': 4,
            'layer2': 8,
            'layer3': 16,
            'layer4': 32,
            'classification': 16,
            'fc': None
        }
        self.layer_dim = {
            'conv1': 64,
            'layer1': 64,
            'layer2': 128,
            'layer3': 256,
            'layer4': 512,
            'classification': 256,
            'fc': None
        }

        self.iou_predictor = self.net.bb_regressor
        self.location_predictor = self.net.location_predictor

        #self.iounet_feature_layers = self.net.bb_regressor_layer
        #self.locator_feature_layers = self.net.location_predictor_layer
        self.iounet_feature_layers = ('layer2', 'layer3')
        self.locator_feature_layers = ('layer2', 'layer3')
        self.backbone_feature_layers = sorted(
            list(set(self.iounet_feature_layers +
                     self.locator_feature_layers)))

        ## all parameters do not require grad
        for p in self.net.parameters():
            p.requires_grad = False
Example #12
0
    def initialize(self, im):
        if os.path.isabs(self.net_path):
            net_path_full = self.net_path
        else:
            net_path_full = os.path.join(env_settings().network_path,
                                         self.net_path)

        if isinstance(self.pool_stride, int) and self.pool_stride == 1:
            self.pool_stride = [1] * len(self.output_layers)

        self.layer_stride = {
            'vggconv1': 2,
            'conv1': 2,
            'layer1': 4,
            'layer2': 8,
            'layer3': 16,
            'layer4': 32,
            'fc': None
        }
        self.layer_dim = {
            'vggconv1': 96,
            'conv1': 64,
            'layer1': 64,
            'layer2': 128,
            'layer3': 256,
            'layer4': 512,
            'fc': None
        }

        #self.mean = torch.Tensor([0.485, 0.456, 0.406]).view(1,-1,1,1)
        #self.std = torch.Tensor([0.229, 0.224, 0.225]).view(1,-1,1,1)
        im = im / 255
        self.mean = torch.Tensor([
            torch.mean(im[:, 0, ...]),
            torch.mean(im[:, 1, ...]),
            torch.mean(im[:, 2, ...])
        ]).view(1, -1, 1, 1)
        self.std = torch.Tensor([
            torch.std(im[:, 0, ...]),
            torch.std(im[:, 1, ...]),
            torch.std(im[:, 2, ...])
        ]).view(1, -1, 1, 1)

        self.net = resnet18_vggmconv1(self.output_layers, path=net_path_full)
        if self.use_gpu:
            self.net.cuda(self.gpu_device) if not (
                self.gpu_device is None) else self.net.cuda()
        self.net.eval()
Example #13
0
    def __init__(self,
                 name: str,
                 parameter_name: str,
                 run_id: int = None,
                 display_name: str = None,
                 if_rt: int = 0):
        assert run_id is None or isinstance(run_id, int)

        self.name = name
        self.parameter_name = parameter_name
        self.run_id = run_id
        self.display_name = display_name
        self.if_rt = if_rt

        env = env_settings()
        if self.run_id is None:
            self.results_dir = '{}/{}/{}'.format(env.results_path, self.name,
                                                 self.parameter_name)
            self.results_dir_rt = '{}/{}/{}'.format(env.results_path_rt,
                                                    self.name,
                                                    self.parameter_name)
            self.segmentation_dir = '{}/{}/{}'.format(env.segmentation_path,
                                                      self.name,
                                                      self.parameter_name)
        else:
            self.results_dir = '{}/{}/{}_{:03d}'.format(
                env.results_path, self.name, self.parameter_name, self.run_id)
            self.results_dir_rt = '{}/{}/{}_{:03d}'.format(
                env.results_path_rt, self.name, self.parameter_name,
                self.run_id)
            self.segmentation_dir = '{}/{}/{}_{:03d}'.format(
                env.segmentation_path, self.name, self.parameter_name,
                self.run_id)

        tracker_module_abspath = os.path.abspath(
            os.path.join(os.path.dirname(__file__), '..', 'tracker',
                         self.name))
        if os.path.isdir(tracker_module_abspath):
            tracker_module = importlib.import_module(
                'pytracking.tracker.{}'.format(self.name))
            self.tracker_class = tracker_module.get_tracker_class()
        else:
            self.tracker_class = None

        self.visdom = None
Example #14
0
    def __init__(self, name: str, parameter_name: str, run_id: int = None):
        self.name = name
        self.parameter_name = parameter_name
        self.run_id = run_id

        env = env_settings()
        if self.run_id is None:
            self.results_dir = '{}/{}/{}'.format(env.results_path, self.name,
                                                 self.parameter_name)
        else:
            self.results_dir = '{}/{}/{}_{:03d}'.format(
                env.results_path, self.name, self.parameter_name, self.run_id)
        if not os.path.exists(self.results_dir):
            os.makedirs(self.results_dir)

        tracker_module = importlib.import_module(
            'pytracking.tracker.{}'.format(self.name))
        self.tracker_class = tracker_module.get_tracker_class()
Example #15
0
    def __init__(self, name: str, parameter_name: str, run_id: int = None):
        self.name = name
        self.parameter_name = parameter_name
        self.run_id = run_id

        env = env_settings()
        self.results_dir = '{}/{}/{}'.format(env.results_path, self.name,
                                             self.parameter_name)
        if not os.path.exists(self.results_dir):
            os.makedirs(self.results_dir)

        tracker_module = importlib.import_module(
            'pytracking.tracker.{}'.format(self.name))

        self.parameters = self.get_parameters()
        self.tracker_class = tracker_module.get_tracker_class()

        self.default_visualization = getattr(self.parameters, 'visualization',
                                             False)
        self.default_debug = getattr(self.parameters, 'debug', 0)
Example #16
0
def unpack_tracking_results(download_path, output_path=None):
    """
    Unpacks zipped benchmark results. The directory 'download_path' should have the following structure
    - root
        - tracker1
            - param1.zip
            - param2.zip
            .
            .
        - tracker2
            - param1.zip
            - param2.zip
        .
        .

    args:
        download_path - Path to the directory where the zipped results are stored
        output_path - Path to the directory where the results will be unpacked. Set to env_settings().results_path
                      by default
    """

    if output_path is None:
        output_path = env_settings().results_path

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    trackers = os.listdir(download_path)

    for t in trackers:
        runfiles = os.listdir(os.path.join(download_path, t))

        for r in runfiles:
            save_path = os.path.join(output_path, t)
            if not os.path.exists(save_path):
                os.makedirs(save_path)
            shutil.unpack_archive(os.path.join(download_path, t, r),
                                  os.path.join(save_path, r[:-4]), 'zip')
Example #17
0
    def initialize(self):
        if os.path.isabs(self.net_path):
            net_path_full = self.net_path
        else:
            net_path_full = os.path.join(env_settings().network_path,
                                         self.net_path)

        self.net, _ = load_network(net_path_full, backbone_pretrained=False)

        self.net.cuda()
        self.net.eval()

        self.mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, -1, 1,
                                                             1).cuda()
        self.std = torch.Tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1).cuda()

        self.iou_predictor = self.net.bb_regressor
        self.iou_feature_extractor = self.net.bb_regressor.get_iou_feat
        self.locator_feature_extractor = self.net.location_predictor.get_locator_feat

        ## all parameters do not require grad
        for p in self.net.parameters():
            p.requires_grad = False
Example #18
0
def plot_results(trackers,
                 dataset,
                 report_name,
                 merge_results=False,
                 plot_types=('success'),
                 force_evaluation=False,
                 **kwargs):
    """
    Plot results for the given trackers

    args:
        trackers - List of trackers to evaluate
        dataset - List of sequences to evaluate
        report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved
        merge_results - If True, multiple random runs for a non-deterministic trackers are averaged
        plot_types - List of scores to display. Can contain 'success',
                    'prec' (precision), and 'norm_prec' (normalized precision)
    """
    # Load data
    settings = env_settings()

    plot_draw_styles = get_plot_draw_styles()

    # Load pre-computed results
    result_plot_path = os.path.join(settings.result_plot_path, report_name)
    eval_data = check_and_load_precomputed_results(trackers, dataset,
                                                   report_name,
                                                   force_evaluation, **kwargs)

    # Merge results from multiple runs
    if merge_results:
        eval_data = merge_multiple_runs(eval_data)

    tracker_names = eval_data['trackers']

    valid_sequence = torch.tensor(eval_data['valid_sequence'],
                                  dtype=torch.bool)

    print('\nPlotting results over {} / {} sequences'.format(
        valid_sequence.long().sum().item(), valid_sequence.shape[0]))

    print('\nGenerating plots for: {}'.format(report_name))

    # ********************************  Success Plot **************************************
    if 'success' in plot_types:
        ave_success_rate_plot_overlap = torch.tensor(
            eval_data['ave_success_rate_plot_overlap'])

        # Index out valid sequences
        auc_curve, auc = get_auc_curve(ave_success_rate_plot_overlap,
                                       valid_sequence)
        threshold_set_overlap = torch.tensor(
            eval_data['threshold_set_overlap'])

        success_plot_opts = {
            'plot_type': 'success',
            'legend_loc': 'lower left',
            'xlabel': 'Overlap threshold',
            'ylabel': 'Overlap Precision [%]',
            'xlim': (0, 1.0),
            'ylim': (0, 100),
            'title': 'Success plot'
        }
        plot_draw_save(auc_curve, threshold_set_overlap, auc, tracker_names,
                       plot_draw_styles, result_plot_path, success_plot_opts)

    # ********************************  Precision Plot **************************************
    if 'prec' in plot_types:
        ave_success_rate_plot_center = torch.tensor(
            eval_data['ave_success_rate_plot_center'])

        # Index out valid sequences
        prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center,
                                                valid_sequence)
        threshold_set_center = torch.tensor(eval_data['threshold_set_center'])

        precision_plot_opts = {
            'plot_type': 'precision',
            'legend_loc': 'lower right',
            'xlabel': 'Location error threshold [pixels]',
            'ylabel': 'Distance Precision [%]',
            'xlim': (0, 50),
            'ylim': (0, 100),
            'title': 'Precision plot'
        }
        plot_draw_save(prec_curve, threshold_set_center, prec_score,
                       tracker_names, plot_draw_styles, result_plot_path,
                       precision_plot_opts)

    # ********************************  Norm Precision Plot **************************************
    if 'norm_prec' in plot_types:
        ave_success_rate_plot_center_norm = torch.tensor(
            eval_data['ave_success_rate_plot_center_norm'])

        # Index out valid sequences
        prec_curve, prec_score = get_prec_curve(
            ave_success_rate_plot_center_norm, valid_sequence)
        threshold_set_center_norm = torch.tensor(
            eval_data['threshold_set_center_norm'])

        norm_precision_plot_opts = {
            'plot_type': 'norm_precision',
            'legend_loc': 'lower right',
            'xlabel': 'Location error threshold',
            'ylabel': 'Distance Precision [%]',
            'xlim': (0, 0.5),
            'ylim': (0, 100),
            'title': 'Normalized Precision plot'
        }
        plot_draw_save(prec_curve, threshold_set_center_norm, prec_score,
                       tracker_names, plot_draw_styles, result_plot_path,
                       norm_precision_plot_opts)

    plt.show()
Example #19
0
def extract_results(trackers, dataset, report_name, skip_missing_seq=False, plot_bin_gap=0.05,
                    exclude_invalid_frames=False):
    settings = env_settings()
    eps = 1e-16

    result_plot_path = os.path.join(settings.result_plot_path, report_name)

    if not os.path.exists(result_plot_path):
        os.makedirs(result_plot_path)

    threshold_set_overlap = torch.arange(0.0, 1.0 + plot_bin_gap, plot_bin_gap, dtype=torch.float64)
    threshold_set_center = torch.arange(0, 51, dtype=torch.float64)
    threshold_set_center_norm = torch.arange(0, 51, dtype=torch.float64) / 100.0

    avg_overlap_all = torch.zeros((len(dataset), len(trackers)), dtype=torch.float64)
    ave_success_rate_plot_overlap = torch.zeros((len(dataset), len(trackers), threshold_set_overlap.numel()),
                                                dtype=torch.float32)
    ave_success_rate_plot_center = torch.zeros((len(dataset), len(trackers), threshold_set_center.numel()),
                                               dtype=torch.float32)
    ave_success_rate_plot_center_norm = torch.zeros((len(dataset), len(trackers), threshold_set_center.numel()),
                                                    dtype=torch.float32)

    valid_sequence = torch.ones(len(dataset), dtype=torch.uint8)

    for seq_id, seq in enumerate(tqdm(dataset)):
        # Load anno
        anno_bb = torch.tensor(seq.ground_truth_rect)
        target_visible = torch.tensor(seq.target_visible, dtype=torch.uint8) if seq.target_visible is not None else None
        for trk_id, trk in enumerate(trackers):
            # Load results
            base_results_path = '{}/{}'.format(trk.results_dir, seq.name)
            if report_name == 'nfs' and (trk.results_dir.split('/')[-2] == 'atom' or trk.results_dir.split('/')[-2] == 'ECO' or trk.results_dir.split('/')[-2] == 'UPDT' or trk.results_dir.split('/')[-2] == 'MDNet' or trk.results_dir.split('/')[-2] == 'CCOT'):
                base_results_path = '{}/nfs_{}'.format(trk.results_dir, seq.name)
                
            results_path = '{}.txt'.format(base_results_path)

            if os.path.isfile(results_path):
                pred_bb = torch.tensor(load_text(str(results_path), delimiter=('\t', ','), dtype=np.float64))
            else:
                if skip_missing_seq:
                    valid_sequence[seq_id] = 0
                    break
                else:
                    raise Exception('Result not found. {}'.format(results_path))

            # Calculate measures
            err_overlap, err_center, err_center_normalized, valid_frame = calc_seq_err_robust(
                pred_bb, anno_bb, seq.dataset, target_visible)

            avg_overlap_all[seq_id, trk_id] = err_overlap[valid_frame].mean()

            if exclude_invalid_frames:
                seq_length = valid_frame.long().sum()
            else:
                seq_length = anno_bb.shape[0]

            if seq_length <= 0:
                raise Exception('Seq length zero')

            ave_success_rate_plot_overlap[seq_id, trk_id, :] = (err_overlap.view(-1, 1) > threshold_set_overlap.view(1, -1)).sum(0).float() / seq_length
            ave_success_rate_plot_center[seq_id, trk_id, :] = (err_center.view(-1, 1) <= threshold_set_center.view(1, -1)).sum(0).float() / seq_length
            ave_success_rate_plot_center_norm[seq_id, trk_id, :] = (err_center_normalized.view(-1, 1) <= threshold_set_center_norm.view(1, -1)).sum(0).float() / seq_length

    print('\n\nComputed results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0]))

    # Prepare dictionary for saving data
    seq_names = [s.name for s in dataset]
    tracker_names = [{'name': t.name, 'param': t.parameter_name, 'run_id': t.run_id, 'disp_name': t.display_name}
                     for t in trackers]

    eval_data = {'sequences': seq_names, 'trackers': tracker_names,
                 'valid_sequence': valid_sequence.tolist(),
                 'ave_success_rate_plot_overlap': ave_success_rate_plot_overlap.tolist(),
                 'ave_success_rate_plot_center': ave_success_rate_plot_center.tolist(),
                 'ave_success_rate_plot_center_norm': ave_success_rate_plot_center_norm.tolist(),
                 'avg_overlap_all': avg_overlap_all.tolist(),
                 'threshold_set_overlap': threshold_set_overlap.tolist(),
                 'threshold_set_center': threshold_set_center.tolist(),
                 'threshold_set_center_norm': threshold_set_center_norm.tolist()}

    with open(result_plot_path + '/eval_data.pkl', 'wb') as fh:
        pickle.dump(eval_data, fh)

    return eval_data
Example #20
0
def parameters(pth_path = None):
    params = TrackerParams()

    # These are usually set from outside
    params.debug = 1                        # Debug level
    params.visualization = True            # Do visualization

    # Use GPU or not (IoUNet requires this to be True)
    params.use_gpu = True

    # Feature specific parameters
    deep_params = TrackerParams()

    # Patch sampling parameters
    params.max_image_sample_size = (16 * 16) ** 2  # (18 * 16) ** 2   # Maximum image sample size
    params.min_image_sample_size = (16 * 16) ** 2  # (18 * 16) ** 2   # Minimum image sample size
    params.search_area_scale = 4.5                    # Scale relative to target size
    params.feature_size_odd = False                 # Good to use False for even-sized kernels and vice versa

    # Optimization parameters
    params.CG_iter = 5                    # The number of Conjugate Gradient iterations in each update after the first frame
    params.init_CG_iter = 60              # The total number of Conjugate Gradient iterations used in the first frame
    params.init_GN_iter = 6               # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated)
    params.post_init_CG_iter = 0          # CG iterations to run after GN
    params.fletcher_reeves = False        # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient
    params.standard_alpha = True          # Use the standard formula for computing the step length in Conjugate Gradient
    params.CG_forgetting_rate = None	  # Forgetting rate of the last conjugate direction

    # Learning parameters for each feature type
    deep_params.learning_rate = 0.0075           # Learning rate
    deep_params.output_sigma_factor = 1/4        # Standard deviation of Gaussian label relative to target size

    # Training parameters
    params.sample_memory_size = 250              # Memory size
    params.train_skipping = 10                   # How often to run training (every n-th frame)

    # Online model parameters
    deep_params.kernel_size = (4, 4)             # Kernel size of filter
    deep_params.compressed_dim = 64              # Dimension output of projection matrix
    deep_params.filter_reg = 1e-1                # Filter regularization factor
    deep_params.projection_reg = 1e-4            # Projection regularization factor

    # Windowing
    params.feature_window = False                # Perform windowing of features
    params.window_output = True                  # Perform windowing of output scores

    # Detection parameters
    params.scale_factors = torch.ones(1)        # What scales to use for localization (only one scale if IoUNet is used)
    params.score_upsample_factor = 1            # How much Fourier upsampling to use

    # Init data augmentation parameters
    params.augmentation = {'fliplr': True,
                           'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60],
                           'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)],
                           'relativeshift': [(0.25, 0.25), (-0.25, 0.25), (0.25, -0.25), (-0.25, -0.25), (0.75, 0.75), (-0.75, 0.75), (0.75, -0.75), (-0.75, -0.75)]}

    params.augmentation_expansion_factor = 2    # How much to expand sample when doing augmentation
    params.random_shift_factor = 0#1 / 3          # How much random shift to do on each augmented sample
    deep_params.use_augmentation = True         # Whether to use augmentation for this feature

    # Factorized convolution parameters
    # params.use_projection_matrix = True       # Use projection matrix, i.e. use the factorized convolution formulation
    params.update_projection_matrix = True      # Whether the projection matrix should be optimized or not
    params.proj_init_method = 'pca'             # Method for initializing the projection matrix  randn | pca
    params.filter_init_method = 'zeros'         # Method for initializing the spatial filter  randn | zeros
    params.projection_activation = 'none'       # Activation function after projection ('none', 'relu', 'elu' or 'mlu')
    params.response_activation = ('mlu', 0.05)  # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu')

    # Advanced localization parameters
    params.advanced_localization = True         # Use this or not
    params.target_not_found_threshold = -1      # Absolute score threshold to detect target missing
    params.distractor_threshold = 100           # Relative threshold to find distractors
    params.hard_negative_threshold = 0.3        # Relative threshold to find hard negative samples
    params.target_neighborhood_scale = 2.2      # Target neighborhood to remove
    params.dispalcement_scale = 0.7             # Dispacement to consider for distractors
    params.hard_negative_learning_rate = 0.02   # Learning rate if hard negative detected
    params.hard_negative_CG_iter = 5            # Number of optimization iterations to use if hard negative detected
    params.update_scale_when_uncertain = True   # Update scale or not if distractor is close

    # Setup the feature extractor (which includes the IoUNet)
    deep_fparams = FeatureParams(feature_params=[deep_params])

    # use ResNet50 for filter
    params.use_resnet50 = True
    if params.use_resnet50:
        deep_feat_filter = deep.ATOMResNet50(output_layers=['layer3'], fparams=deep_fparams, normalize_power=2)
       # deep_feat2 = deep.DRNetSE50(net_path='SE_Res50.pth', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2)

        params.features_filter = MultiResolutionExtractor([deep_feat_filter])
        #params.features_filter = MultiResolutionExtractor([deep_feat2])

    params.vot_anno_conversion_type = 'preserve_area'

    params.use_segmentation = True

    env = env_settings()
    net_path = env.network_path

    if pth_path is None:
       pth_path = '/home/jaffe/PycharmProjects//DMB/pytracking/networks/recurrent25.pth.tar'

    params.pth_path = pth_path
    params.segm_use_dist = True
    params.segm_normalize_mean = [0.485, 0.456, 0.406]
    params.segm_normalize_std = [0.229, 0.224, 0.225]
    params.segm_search_area_factor = 4.0
    params.segm_feature_sz = 24
    params.segm_output_sz = params.segm_feature_sz * 16
    params.segm_scale_estimation = True
    params.segm_optimize_polygon = True

    params.tracking_uncertainty_thr = 3
    params.response_budget_sz = 25
    params.uncertainty_segm_scale_thr = 3.5
    params.uncertainty_segment_thr = 10
    params.segm_pixels_ratio = 2
    params.mask_pixels_budget_sz = 25
    params.segm_min_scale = 0.2
    params.max_rel_scale_ch_thr = 0.75
    params.consider_segm_pixels_ratio = 1
    params.opt_poly_overlap_thr = 0.3
    params.poly_cost_a = 1.2
    params.poly_cost_b = 1
    params.segm_dist_map_type = 'center'  # center | bbox
    params.min_scale_change_factor = 0.95
    params.max_scale_change_factor = 1.05
    params.init_segm_mask_thr = 0.5
    params.segm_mask_thr = 0.5

    params.masks_save_path = ''
    # params.masks_save_path = 'save-masks-path'
    params.save_mask = False
    if params.masks_save_path != '':
        params.save_mask = True

    return params
Example #21
0
 def __init__(self):
     self.env_settings = env_settings()
Example #22
0
 def __init__(self, name, frames, ground_truth_rect):
     self.name = name
     self.frames = frames
     self.ground_truth_rect = ground_truth_rect
     self.env_settings = env_settings()
     self.pred_trajs = {}