Exemplo n.º 1
0
def main():
    global args, v_id
    args = parser.parse_args()

    net = SiamRPNotb()
    net.load_state_dict(torch.load(join(realpath(dirname(__file__)), 'SiamRPNOTB.model')))
    net.eval().cuda()

    dataset = load_dataset(args.dataset)
    fps_list = []
    for v_id, video in enumerate(dataset.keys()):
        fps_list.append(track_video(net, dataset[video]))
    print('Mean Running Speed {:.1f}fps'.format(np.mean(np.array(fps_list))))
Exemplo n.º 2
0
def SiamRPN_load(image, boxes, txt_path):
    if not os.path.exists(txt_path):
        os.makedirs(txt_path)
    multiTracker = cv2.MultiTracker_create()
    net = SiamRPNotb()
    net.load_state_dict(
        torch.load(join(realpath(dirname(__file__)), 'SiamRPNOTB.model')))
    net.eval().cuda()
    states, labels = [], []
    for bbox in boxes:
        #init_rbox = [bbox[0],bbox[1],bbox[0]+bbox[2],bbox[1],bbox[0]+bbox[2],bbox[1]+bbox[3],bbox[0],bbox[1]+bbox[3]]
        init_rbox = [
            bbox[0], bbox[1], bbox[2], bbox[1], bbox[2], bbox[3], bbox[0],
            bbox[3]
        ]
        [cx, cy, w, h] = get_axis_aligned_bbox(init_rbox)
        # print(cx, cy, w, h,'-----',init_rbox)
        target_pos, target_sz = np.array([cx, cy]), np.array([w, h])
        states.append(SiamRPN_init(image, target_pos, target_sz, net))
        labels.append(bbox[-1])
    return states, labels
Exemplo n.º 3
0
class DaSiamRPN(BaseTracker):
    def __init__(self):
        super(DaSiamRPN, self).__init__(name="DaSiamRPN")
        self.net_file = path_config.DASIAMRPN_MODEL

    def initialize(self, image_file, box):
        self.net = SiamRPNotb()
        self.net.load_state_dict(torch.load(self.net_file))
        self.net.eval().cuda()

        image = cv2.imread(image_file)
        box = box - np.array([1, 1, 0, 0])
        self.state = SiamRPN_init(image, box[:2] + box[2:] / 2.0, box[2:],
                                  self.net)  # init tracker

    def track(self, image_file):
        image = cv2.imread(image_file)
        self.state = SiamRPN_track(self.state, image)  # track
        center = self.state["target_pos"] + 1
        target_sz = self.state["target_sz"]
        box = cxy_wh_2_rect(center, target_sz)
        return box
Exemplo n.º 4
0
import torch
from net import SiamRPNotb

net = SiamRPNotb()
net.load_state_dict(torch.load('SiamRPNOTB.model'))

dict={}
conv_layer_index=1
#gamma is scale, and beata is bias
for layer in net.featureExtract:
  if isinstance(layer,torch.nn.modules.conv.Conv2d ):
    dict['conv%d/weights'%(conv_layer_index)] = layer.weight.data.numpy().transpose(2,3,1,0)
    dict['conv%d/biases'%(conv_layer_index)] = layer.bias.data.numpy()
  elif isinstance(layer,torch.nn.modules.batchnorm.BatchNorm2d):
    dict['bn%d/moving_mean'%(conv_layer_index)] = layer.running_mean.numpy()
    dict['bn%d/moving_variance'%(conv_layer_index)] = layer.running_var.numpy()
    dict['bn%d/gamma'%(conv_layer_index)] = layer.weight.data.numpy()
    dict['bn%d/beta'%(conv_layer_index)] = layer.bias.data.numpy()
    conv_layer_index = conv_layer_index + 1
  else:
    print("Tere are no parameters in layer: ", layer)
  
dict['conv_r1/weights'] = net.conv_r1.weight.data.numpy().transpose(2,3,1,0)
dict['conv_r1/biases'] = net.conv_r1.bias.data.numpy()

dict['conv_r2/weights'] = net.conv_r2.weight.data.numpy().transpose(2,3,1,0)
dict['conv_r2/biases'] = net.conv_r2.bias.data.numpy()

dict['conv_cls1/weights'] = net.conv_cls1.weight.data.numpy().transpose(2,3,1,0)
dict['conv_cls1/biases'] = net.conv_cls1.bias.data.numpy()