示例#1
0
def siamese_init(im, target_pos, target_sz, model, hp=None, device='cpu'):
    """
    初始化跟踪器,根据目标的信息构建state 字典
    :param im: 当前处理的图像
    :param target_pos: 目标的位置
    :param target_sz: 目标的尺寸
    :param model: 训练好的网络模型
    :param hp: 超参数
    :param device: 硬件信息
    :return: 跟踪器的state字典数据
    """

    # 初始化state字典
    state = dict()
    # 设置图像的宽高
    state['im_h'] = im.shape[0]
    state['im_w'] = im.shape[1]
    # 配置跟踪器的相关参数
    p = TrackerConfig()
    # 对参数进行更新
    p.update(hp, model.anchors)
    # 更新参数
    p.renew()
    # 获取网络模型
    net = model
    # 根据网络参数对跟踪器的参数进行更新,主要是anchors
    p.scales = model.anchors['scales']
    p.ratios = model.anchors['ratios']
    p.anchor_num = model.anchor_num
    # 生成锚点
    p.anchor = generate_anchor(model.anchors, p.score_size)
    # 图像的平均值
    avg_chans = np.mean(im, axis=(0, 1))
    # 根据设置的上下文比例,输入z 的宽高及尺寸
    wc_z = target_sz[0] + p.context_amount * sum(target_sz)
    hc_z = target_sz[1] + p.context_amount * sum(target_sz)
    s_z = round(np.sqrt(wc_z * hc_z))
    # 初始化跟踪目标 initialize the exemplar
    z_crop = get_subwindow_tracking(im, target_pos, p.exemplar_size, s_z,
                                    avg_chans)
    # 将其转换为Variable可在pythorch中进行反向传播
    z = Variable(z_crop.unsqueeze(0))
    # 专门处理模板
    net.template(z.to(device))
    # 设置使用的惩罚窗口
    if p.windowing == 'cosine':
        # 利用hanning窗的外积生成cosine窗口
        window = np.outer(np.hanning(p.score_size), np.hanning(p.score_size))
    elif p.windowing == 'uniform':
        window = np.ones((p.score_size, p.score_size))
    # 每一个anchor都有一个对应的惩罚窗口
    window = np.tile(window.flatten(), p.anchor_num)
    # 将信息更新到state字典中
    state['p'] = p
    state['net'] = net
    state['avg_chans'] = avg_chans
    state['window'] = window
    state['target_pos'] = target_pos
    state['target_sz'] = target_sz
    return state
示例#2
0
def siamese_init(im, target_pos, target_sz, model, hp=None, device='cpu'):
    # print("------siamese_init-------")
    state = dict()
    state['im_h'] = im.shape[0]
    state['im_w'] = im.shape[1]
    # print("im.shape[0] ", im.shape[0])
    p = TrackerConfig()
    p.update(hp, model.anchors)

    p.renew()

    net = model
    p.scales = model.anchors['scales']
    p.ratios = model.anchors['ratios']
    p.anchor_num = model.anchor_num

    p.anchor = generate_anchor(model.anchors, p.score_size)
    avg_chans = np.mean(im, axis=(0, 1))

    wc_z = target_sz[0] + p.context_amount * sum(target_sz)
    hc_z = target_sz[1] + p.context_amount * sum(target_sz)
    s_z = round(np.sqrt(wc_z * hc_z))
    # initialize the exemplar
    z_crop = get_subwindow_tracking(im, target_pos, p.exemplar_size, s_z,
                                    avg_chans)
    # print("z size (patch) ", z_crop.size())
    z = Variable(z_crop.unsqueeze(0))
    # La xarxa es guarda les features resultants (self.zf) d'haver passat el patch z per la siamesa
    net.template(z.to(device))

    if p.windowing == 'cosine':
        window = np.outer(np.hanning(p.score_size), np.hanning(p.score_size))
    elif p.windowing == 'uniform':
        window = np.ones((p.score_size, p.score_size))
    window = np.tile(window.flatten(), p.anchor_num)

    state['p'] = p
    state['net'] = net
    state['avg_chans'] = avg_chans
    state['window'] = window
    state['target_pos'] = target_pos
    state['target_sz'] = target_sz
    # print("window = ", state['window'])
    return state
示例#3
0
def siamese_init(im, target_pos, target_sz, model, hp=None, device='cpu'):
    state = dict()
    state['im_h'] = im.shape[0]
    state['im_w'] = im.shape[1]
    p = TrackerConfig()
    p.update(hp, model.anchors)

    p.renew()

    net = model
    p.scales = model.anchors['scales']
    p.ratios = model.anchors['ratios']
    p.anchor_num = model.anchor_num
    p.anchor = generate_anchor(model.anchors, p.score_size)
    avg_chans = np.mean(im, axis=(0, 1))

    wc_z = target_sz[0] + p.context_amount * sum(target_sz)
    hc_z = target_sz[1] + p.context_amount * sum(target_sz)
    s_z = round(np.sqrt(wc_z * hc_z))
    # initialize the exemplar
    z_crop = get_subwindow_tracking(im, target_pos, p.exemplar_size, s_z,
                                    avg_chans)

    z = Variable(z_crop.unsqueeze(0))
    net.template(z.to(device))

    if p.windowing == 'cosine':
        window = np.outer(np.hanning(p.score_size),
                          np.hanning(p.score_size))  # 外积的结果是矩阵,内积的结果是一个数
    elif p.windowing == 'uniform':
        window = np.ones((p.score_size, p.score_size))
    window = np.tile(window.flatten(), p.anchor_num)

    state['p'] = p
    state['net'] = net
    state['avg_chans'] = avg_chans
    state['window'] = window
    state['target_pos'] = target_pos
    state['target_sz'] = target_sz
    return state
示例#4
0
文件: test.py 项目: maazullah96/files
def siamese_init(im,
                 model,
                 hp=None,
                 device='cpu',
                 targets=None,
                 detector=None):
    custom_objects = detector.CustomObjects(car=True, person=True)
    state = dict()
    state['im_h'] = im.shape[0]
    state['im_w'] = im.shape[1]
    p = TrackerConfig()
    p.update(hp, model.anchors)

    p.renew()

    net = model
    p.scales = model.anchors['scales']
    p.ratios = model.anchors['ratios']
    p.anchor_num = model.anchor_num
    p.anchor = generate_anchor(model.anchors, p.score_size)
    avg_chans = np.mean(im, axis=(0, 1))

    # s_z = [ round(np.sqrt(target["target_sz"][1] + 0.123 * sum(target["target_sz"])*target["target_sz"][0] + 0.123 * sum(target["target_sz"]) ))  for target in targets ]
    # s_z = np.array(s_z)
    # print(targe)
    # targets.append(targe)
    # print(targets)
    BLUE = [255, 255, 255]

    for i, target in enumerate(targets):
        wc_z = target["target_sz"][0] + p.context_amount * sum(
            target["target_sz"])
        hc_z = target["target_sz"][1] + p.context_amount * sum(
            target["target_sz"])
        target["s_z"] = round(np.sqrt(wc_z * hc_z))

    print("out")

    # initialize the exemplar
    targets = get_subwindow_tracking(
        im,
        p.exemplar_size,
        avg_chans,
        targets=targets,
    )

    # z_f = [ net.template(Variable(target["im_to_torch"].unsqueeze(0)).to(device))  for target in targets ]

    for i, target in enumerate(targets):
        # detections = detector.detectCustomObjectsFromImage(custom_objects=custom_objects, input_image=target["im_patch"],input_type="array", output_image_path=os.path.join("image {} custom.jpg".format(i)),output_type="file", minimum_percentage_probability=30)
        # detections = detector.detectCustomObjectsFromImage(custom_objects=custom_objects, input_image=target["img"],input_type="array", output_image_path=os.path.join(execution_path , "images.jpg"),output_type="file", minimum_percentage_probability=30)
        z = Variable(target["im_to_torch"].unsqueeze(0))
        target["zf"] = net.template(z.to(device))

        del target["im_to_torch"]
        # for eachObject in detections:
        #     print(eachObject["name"] , " : ", eachObject["percentage_probability"], " : ", eachObject["box_points"] )
        #     target["detection"] = eachObject["box_points"]

        #     print("--------------------------------")

    if p.windowing == 'cosine':
        window = np.outer(np.hanning(p.score_size), np.hanning(p.score_size))
    elif p.windowing == 'uniform':
        window = np.ones((p.score_size, p.score_size))
    window = np.tile(window.flatten(), p.anchor_num)

    state['p'] = p
    state['net'] = net
    state['avg_chans'] = avg_chans
    state['window'] = window
    state["targets"] = targets
    state["detector"] = detector
    # state["s_z"] = s_z
    # state["z_f"] = z_f
    return state
示例#5
0
def siamese_init(im,
                 search_shape,
                 target_pos,
                 target_sz,
                 model,
                 hp=None,
                 device='cpu'):
    """
    generate anchors, inference the template image, set up window
    :param im: whole image
    :param target_pos: target position that are selected
    :param target_sz: target size that are selected
    :param model: SiamMask model
    :param hp: hyper parameters
    :param device:
    :return:
    """
    state = dict()
    state['im_h'] = search_shape[0]
    state['im_w'] = search_shape[1]
    p = TrackerConfig()
    p.update(hp, model.anchors)

    p.renew()

    net = model
    p.scales = model.anchors['scales']
    p.ratios = model.anchors['ratios']
    p.anchor_num = model.anchor_num
    p.anchor = generate_anchor(
        model.anchors, p.score_size)  # anchor size: (25*25*5, 4) --> (3125, 4)
    avg_chans = np.mean(im, axis=(0, 1))

    # wc_z = target_sz[0] + p.context_amount * sum(target_sz)
    # hc_z = target_sz[1] + p.context_amount * sum(target_sz)
    # s_z = round(np.sqrt(wc_z * hc_z))  # crop size = sqrt((w+(w+h)/2)*(h+(w+h)/2))
    ## initialize the exemplar
    #im_patch = get_subwindow_tracking(im, target_pos, p.exemplar_size, s_z, avg_chans, out_mode="numpy")
    im_patch = im
    im_patch = cv2.resize(im_patch, (p.exemplar_size, p.exemplar_size))

    cv2.imshow('crop_template', im_patch)
    cv2.waitKey(0)
    z_crop = im_to_torch(im_patch)

    z = Variable(z_crop.unsqueeze(0))
    net.template(z.to(device))

    if p.windowing == 'cosine':
        window = np.outer(np.hanning(p.score_size), np.hanning(p.score_size))
    elif p.windowing == 'uniform':
        window = np.ones((p.score_size, p.score_size))
    window = np.tile(window.flatten(), p.anchor_num)

    state['p'] = p
    state['net'] = net
    state['avg_chans'] = avg_chans
    state['window'] = window
    state['target_pos'] = target_pos
    state['target_sz'] = target_sz
    return state
示例#6
0
def siamese_init(im,
                 target_pos,
                 target_sz,
                 model,
                 hp=None,
                 device='cpu'):  #target_pos, target_sz输入的就是由gt轴对称得来的
    state = dict()
    state['im_h'] = im.shape[0]
    state['im_w'] = im.shape[1]
    p = TrackerConfig()  #配置参数
    p.update(hp,
             model.anchors)  #用hp,model.anchors更新p的参数,相当于用config_vot.json更新p
    p.renew()
    #    p.score_size=25
    net = model
    p.scales = model.anchors['scales']  #Custom的父类SiamMask里的属性
    p.ratios = model.anchors['ratios']
    p.anchor_num = model.anchor_num  #vot数据集上是5

    p.anchor = generate_anchor(
        model.anchors, p.score_size
    )  #generate_anchor 生成锚点。p.anchor.shape = (p.anchor_num*p.score_size*p.score_size,4)
    avg_chans = np.mean(im, axis=(0, 1))  #此处im单张图片,对每个颜色通道都求均值(3,)(B,G,R)

    #图像预处理,按比例外扩目标框,从而获得一定的 context 信息。p.context_amount = 0.5
    wc_z = target_sz[0] + p.context_amount * sum(
        target_sz)  #wc_z = w + p.context_amuont * (w+h)
    hc_z = target_sz[1] + p.context_amount * sum(
        target_sz)  #hc_z = h + p.context_amuont * (w+h)
    #需要将框定的框做一个大约2倍放大,以物体为中心, s_z为宽高,截取一个正方体的物体出来
    s_z = round(np.sqrt(wc_z *
                        hc_z))  #round四舍五入取整,round(2.5) = 2,round(2.51)=3
    # initialize the exemplar
    z_crop = get_subwindow_tracking(
        im, target_pos, p.exemplar_size, s_z,
        avg_chans)  #tensor<(3, 127, 127), float32, cpu>
    #TrackerConfig中的定义是 input z size,127
    #z_crop的维度是(127*127*3)

    z = Variable(
        z_crop.unsqueeze(0)
    )  #pytorch中的命令,扩充数据维度,变成神经网络的参数tensor<(1, 3, 127, 127), float32, cpu>
    net.template(z.to(device))  #将z送到cuda上面提取特征,即得到resnet50之后的结果

    if p.windowing == 'cosine':  #默认
        window = np.outer(
            np.hanning(p.score_size), np.hanning(p.score_size)
        )  #求外积 ndarray(p.score_size,p.score_size)即<(25, 25), float64>
    elif p.windowing == 'uniform':
        window = np.ones((p.score_size, p.score_size))

    window = np.tile(window.flatten(),
                     p.anchor_num)  #对window.flatten()在X轴进行重复p.anchor_num次
    #ndarray<(3125,), float64>,p.anchor_num=5
    state['p'] = p
    state['net'] = net
    state['avg_chans'] = avg_chans
    state['window'] = window
    state['target_pos'] = target_pos  #还是传进来的数据
    state['target_sz'] = target_sz  #还是传进来的数据
    return state