Пример #1
0
    def __init__(self, model_path):
        self.model = SiameseAlexNet()
        checkpoint = torch.load(model_path)
        if 'model' in checkpoint.keys():
            self.model.load_state_dict(torch.load(model_path)['model'])
        else:
            self.model.load_state_dict(torch.load(model_path))
        self.model = self.model.cuda()
        self.model.eval()
        self.transforms = transforms.Compose([
            ToTensor()
        ])

        valid_scope = 2 * config.valid_scope + 1
        self.anchors = generate_anchors(config.total_stride, config.anchor_base_size, config.anchor_scales,
                                        config.anchor_ratios,
                                        valid_scope)
        self.window = np.tile(np.outer(np.hanning(config.score_size), np.hanning(config.score_size))[None, :],
                              [config.anchor_num, 1, 1]).flatten()
Пример #2
0
 def __init__(self, scales, ratios, feat_stride, anchor_per_sample=128):
     super(AnchorProposal, self).__init__()
     scales = np.array(scales)
     ratios = np.array(ratios)
     self.feat_stride = feat_stride
     self.anchors = generate_anchors(feat_stride, ratios, scales)
     self.num_anchors = self.anchors.shape[0]
     self.num_category = 3
     # train
     self.allowed_border = 0
     self.anchor_negative_overlap = 0.4
     self.anchor_positive_overlap = 0.6
     self.anchor_fg_fraction = 0.25
     self.anchor_per_sample = anchor_per_sample
     self.hard_mining = True
     # test
     self.num_det_per_category = 64
     self.min_size = 0
     self.nms_th = 0.3
     self.score_th = 0.6
Пример #3
0
    def __init__(self,
                 db,
                 video_names,
                 data_dir,
                 z_transforms,
                 x_transforms,
                 training=True):
        self.video_names = video_names
        self.data_dir = data_dir
        self.z_transforms = z_transforms
        self.x_transforms = x_transforms
        meta_data_path = os.path.join(data_dir, 'meta_data.pkl')
        self.meta_data = pickle.load(open(meta_data_path, 'rb'))
        self.meta_data = {x[0]: x[1] for x in self.meta_data}
        # filter traj len less than 2
        for key in self.meta_data.keys():
            trajs = self.meta_data[key]
            for trkid in list(trajs.keys()):
                if len(trajs[trkid]) < 2:
                    del trajs[trkid]

        self.txn = db.begin(write=False)
        self.num = len(self.video_names) if config.pairs_per_video_per_epoch is None or not training \
            else config.pairs_per_video_per_epoch * len(self.video_names)

        # data augmentation
        self.max_stretch = config.scale_resize
        self.max_translate = config.max_translate
        self.random_crop_size = config.instance_size
        self.center_crop_size = config.exemplar_size

        self.training = training

        valid_scope = 2 * config.valid_scope + 1
        self.anchors = generate_anchors(config.total_stride,
                                        config.anchor_base_size,
                                        config.anchor_scales,
                                        config.anchor_ratios, valid_scope)