コード例 #1
0
    def get_posneg_samples(self, imgwh, pos_size, neg_size, use_whole=True, **kwargs):
        pos_thresh = kwargs.get('pos_thresh', ADNetConf.g()['initial_finetune']['pos_thresh'])
        neg_thresh = kwargs.get('neg_thresh', ADNetConf.g()['initial_finetune']['neg_thresh'])

        gaussian_samples = self.gen_noise_samples(imgwh, 'gaussian', pos_size * 2, kwargs=kwargs)
        gaussian_samples = [x for x in gaussian_samples if x.iou(self) > pos_thresh]

        uniform_samples = self.gen_noise_samples(imgwh, 'uniform', neg_size if use_whole else neg_size*2, kwargs=kwargs)
        uniform_samples = [x for x in uniform_samples if x.iou(self) < neg_thresh]

        if use_whole:
            whole_samples = self.gen_noise_samples(imgwh, 'whole', neg_size, kwargs=kwargs)
            whole_samples = [x for x in whole_samples if x.iou(self) < neg_thresh]
        else:
            whole_samples = []

        pos_samples = []
        for _ in range(pos_size):
            # dylan, avoid gaussian_samplesv is empty
            if gaussian_samples:
                pos_samples.append(random.choice(gaussian_samples))
                
        neg_candidates = uniform_samples + whole_samples
        neg_samples = []
        for _ in range(neg_size):
            neg_samples.append(random.choice(neg_candidates))
        return pos_samples, neg_samples
コード例 #2
0
    def __init__(self, db='VOT', path_head='', data_path='dataset/'):

        super(TrackEnv, self).__init__()
        
        # Becasuse multiprocessing start env processings by 'spawn', 
        # ADNetConf configuration in the parent processing will be invalid
        ADNetConf.get('conf/dylan.yaml')
        self.stop_iou, self.stop_cnt = ADNetConf.get()['dl_paras']['stop_iou_cnt']
        self.sample_zoom = ADNetConf.g()['dl_paras']['zoom_scale']
        self.out_limit = ADNetConf.g()['dl_paras']['actor_out_limt']
        self.len_seq = ADNetConf.g()['dl_paras']['len_seq']
        self.reward_stages = ADNetConf.g()['dl_paras']['reward_stages']

        self.action_space = gym.spaces.Box(low=-1, high=1, shape=(4,), dtype=np.float32)
        ob_shape = (107, 107, 3) if len(self.sample_zoom)==1 else (len(self.sample_zoom),) + (107, 107, 3)
        self.observation_space = gym.spaces.Box(low=0, high=255, shape=ob_shape, dtype=np.uint8)

        self.data_path = data_path
        pkl_path = path_head + 'dataset/vot-otb.pkl' if db=='VOT' else path_head +'dataset/otb-vot.pkl'
        
        with open(pkl_path, 'rb') as f:
            self.dataset = pickle.load(f)
            
        self.n_seq = len(self.dataset)
        self.seq_names = list(self.dataset)
  
        self.min_len = 40
コード例 #3
0
ファイル: commons.py プロジェクト: dinglijay/ImitateTracking
def extract_region(img, bbox):
    xy_center = bbox.xy + bbox.wh * 0.5

    wh = bbox.wh * ADNetConf.get()['predict']['roi_zoom']
    xy = xy_center - wh * 0.5
    xy.x = max(xy.x, 0)
    xy.y = max(xy.y, 0)

    # crop and resize
    crop = img[xy.y:xy.y + wh.y, xy.x:xy.x + wh.x, :]
    resize = cv2.resize(crop, (112, 112))
    return resize
コード例 #4
0
def crop_resize(img, bbox, img_size=107, zoom=None):
    if zoom == None:
        zoom = ADNetConf.g()['dl_paras']['zoom_scale']
    
    if isinstance(img, np.ndarray):
        img = Image.fromarray(img)
    if not isinstance(bbox, BoundingBox):
        bbox = BoundingBox(*bbox)
   
    bbox = bbox.fit_image(img.size, margin=10)
    
    patchs = []
    for scale in zoom:
        bbox_zoomed = bbox.zoom(scale).to_xyxy2().to_tuple()
        patch = img.crop(bbox_zoomed).resize((img_size,img_size))
        patch = np.array(patch)
        patchs.append(patch)
        
    return np.squeeze(np.array(patchs))
コード例 #5
0
def main():
    
    from track_policy import TrackPolicy
    from baselines.common import tf_util as U
    
    ADNetConf.get('../../../conf/dylan.yaml')
    env = TrackEnv(db='OTB', path_head='../../../', data_path="../../../dataset/")
    ob = env.reset(startFromFirst=True)
    # ob = env.reset('vot2016/hand', startFromFirst=True)

    actor = TrackPolicy("actor",
                        ob_space=env.observation_space,
                        ac_space=env.action_space,
                        load_path='../../../log/0309_track2CnnFc12_noAct/checkpoints/01100')
    U.initialize()
    
    
    img = Image.open(env.data_path + env.seq_id + r'/' + env.images[0])
        
    fig = plt.figure()
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.set_axis_off()
    fig.add_axes(ax)
    im = ax.imshow(img)
    
    gt = env.gts[0].to_tuple()
    gt_rect = plt.Rectangle(tuple(gt[:2]), gt[2], gt[3],
                            linewidth=2, edgecolor="r", zorder=1, fill=False)
    ax.add_patch(gt_rect)
    rect = plt.Rectangle(tuple(gt[:2]), gt[2], gt[3],
                             linewidth=2, edgecolor="g", zorder=1, fill=False)
    ax.add_patch(rect)
    
    plt.pause(.01)
    plt.draw()
    
    ac1, vpred1 = actor.act(stochastic=False, ob=ob)
    ac1 = np.clip(ac1, -1, 1)
    # ac1 = 5.0*np.array(cal_distance(env.gts[0], env.gts[1]))
    #env.render()
    
    reward_sum, cnt = 0, 0
    while True:
        
        im.set_data(env.img)
       
        gt = env.gts[env.pointer].to_tuple()
        gt_rect.set_xy(gt[:2])
        gt_rect.set_width(gt[2]-1)
        gt_rect.set_height(gt[3]-1)

        ob, reward, done, tracker_info = env.step(ac1)
        cnt +=1
            
        result_bb = tracker_info['tracker_post'].to_tuple()
        rect.set_xy(result_bb[:2])  
        rect.set_width(result_bb[2]-1)
        rect.set_height(result_bb[3]-1)
        
        plt.pause(.01)
        plt.draw()
        
        reward_sum += reward

        if done:
            break
#        env.render()
        ac1, vpred1 = actor.act(stochastic=False, ob=ob)
        ac1 = np.clip(ac1, -1, 1)

        # ac1 = 5.0*np.array(cal_distance(tracker_info['tracker_post'], tracker_info['gt']))
#        ac1 = np.array([0.05,0.05,0.00,0.00])
    print(reward_sum/(cnt))
コード例 #6
0
import gym
import track_env
import numpy as np
from stable_baselines.common.env_checker import check_env
from conf.configs import ADNetConf

ADNetConf.get('conf/dylan.yaml')
env= gym.make('track-v0')
obs = env.reset()
ob_space = env.observation_space

check_env(env)