def mot_get_label(tracklets, new_tracklet, param): #look for the tracklet before new tracklet affinity = [] for trk in tracklets: temp_Trk = Tracklet() temp_Trk.FMotion = trk.FMotion XX, PP = mot_motion_model_generation(trk, param, 'Backward') temp_Trk.BMotion.X = XX temp_Trk.BMotion.P = PP temp_Trk.init_time = trk.ifr temp_Trk.end_time = trk.last_update new_Trk = Tracklet() new_Trk.FMotion = new_tracklet.FMotion XX, PP = mot_motion_model_generation(new_tracklet, param, 'Backward') new_Trk.BMotion.X = XX new_Trk.BMotion.P = PP new_Trk.init_time = new_tracklet.ifr new_Trk.end_time = new_tracklet.last_update if trk.last_update < new_tracklet.ifr: motion_similarity = mot_motion_similarity(temp_Trk, new_Trk, param, "Trk") a_model_affinity = cal_a_model_affinity( [trk.A_model_tail], [new_tracklet.A_model_head], 0)[0] final_affinity = motion_similarity * a_model_affinity affinity.append([final_affinity, trk.label]) if trk.ifr < new_tracklet.last_update: motion_similarity = mot_motion_similarity(new_Trk, temp_Trk, param, "Trk") a_model_affinity = cal_a_model_affinity( [trk.A_model_head], [new_tracklet.A_model_tail], 0)[0] final_affinity = motion_similarity * a_model_affinity affinity.append([final_affinity, trk.label]) if len(affinity) > 0: print(affinity, 1213123123123123) affinity = sorted(affinity, key=(lambda x: x[0]), reverse=True) max_affinity = affinity[0] if max_affinity[0] > param.similar_thresh: new_tracklet.label = max_affinity[1] else: new_tracklet.label = Labelling(param) else: new_tracklet.label = Labelling(param)
def MOT_Global_Association(Trk = None,Obs_grap = None,Obs_info = None,param = None,fr = None,*args,**kwargs): ILDA = param.ILDA Refer = [] Test = [] all_indx = [i for i in range(0,len(Trk))] low_indx,_,_ = Idx2Types(Trk,'Low') high_indx = np.setdiff1d(all_indx,low_indx) yidx = np.where(Obs_grap[fr].iso_idx == 1)[0] yhist = Obs_info.yhist ystate = Obs_info.ystate ystates_ids = Obs_info.ystates_ids High_trk = [] Low_trk = [] Y_set = [] if len(low_indx) !=0: # For tracklets with low confidence for ii in range(0,len(low_indx)): i = low_indx[ii] temp_Trk_low = Tracklet() temp_Trk_low.hist = Trk[i].A_Model temp_Trk_low.FMotion = Trk[i].FMotion temp_Trk_low.last_update = Trk[i].last_update temp_Trk_low.h = Trk[i].state[-1][3] temp_Trk_low.w = Trk[i].state[-1][2] temp_Trk_low.type = Trk[i].type temp_Trk_low.end_time = Trk[i].efr Low_trk.append(temp_Trk_low) # For tracklet with high confidence for jj in range(0,len(high_indx)): j = high_indx[jj] temp_Trk_high = Tracklet() temp_Trk_high.hist = Trk[j].A_Model temp_Trk_high.h = Trk[j].state[-1][3] temp_Trk_high.w = Trk[j].state[-1][2] temp_Trk_high.FMotion = Trk[j].FMotion XX,PP = mot_motion_model_generation(Trk[j],param,'Backward',nargout = 2) temp_Trk_high.BMotion.X = XX temp_Trk_high.BMotion.P = PP temp_Trk_high.last_update = Trk[j].last_update temp_Trk_high.init_time = Trk[j].ifr High_trk.append(temp_Trk_high) iso_label = [] if len(yidx) != 0: # For detections for jj in range(0,len(yidx)): j = yidx[jj] temp_y_set = Z_item() temp_y_set.hist = yhist[:,:,j] temp_y_set.pos = [ystate[j][0],ystate[j][1]] temp_y_set.h = ystate[j][3] temp_y_set.w = ystate[j][2] Y_set.append(temp_y_set) iso_label = j thr = param.obs_thr score_trk = mot_eval_association_matrix(Low_trk,High_trk,param,'Trk',ILDA) score_obs = mot_eval_association_matrix(Low_trk,Y_set,param,'Obs',ILDA) score_mat = np.concatenate((score_trk,score_obs),axis=1) matching,Affinity = mot_association_hungarian(score_mat,thr,nargout = 2) alpha = param.alpha rm_idx = [] for m in range(0,len(Affinity)): mat_idx = matching[1,m] if mat_idx <= len(high_indx) - 1: t_idx = low_indx[matching[0,m]] y_idx = high_indx[matching[1,m]] Trk[y_idx].ifr = Trk[t_idx].ifr fr1 = Trk[t_idx].ifr fr2 = Trk[t_idx].efr for kk in range(fr1,fr2): Trk[y_idx].state[kk] = Trk[t_idx].state[kk] numHyp = len(Trk[t_idx].hyp.score) for kk in range(fr1,numHyp): Trk[y_idx].hyp.score[kk] = Trk[t_idx].hyp.score[kk] Trk[y_idx].hyp.ystate[kk] = Trk[t_idx].hyp.ystate[kk] Trk[y_idx].hyp.ystates_ids[kk] = Trk[t_idx].hyp.ystates_ids[kk] for kk in range(numHyp + 1,fr): Trk[y_idx].hyp.score[kk] = param.init_prob Trk[y_idx].hyp.ystate[kk] = [] Trk[y_idx].hyp.ystates_ids[kk] = -1 Trk[y_idx].A_Model = alpha*Trk[t_idx].A_Model + (1 - alpha)*Trk[y_idx].A_Model XX = [] numState = len(Trk[y_idx].state) XX[1,:] = Trk(y_idx).state[fr1](1) XX[3,:] = Trk(y_idx).state[fr1](2) XX[2,:] = 0 XX[4,:] = 0 PP = param.P for ff in range(fr1,numState): tState = Trk(y_idx).state[ff] if logical_not(isempty(tState)): XX,PP = km_estimation(XX,tState(range(1,2)),param,PP,nargout = 2) else: tState = Trk(y_idx).state[fr2] XX,PP = km_estimation(XX,[],param,PP,nargout = 2) Trk(y_idx).state[ff][range(1,2),:] = concat([[XX(1)],[XX(3)]]) Trk(y_idx).state[ff][range(3,4),:] = concat([[tState(1)],[tState(3)]]) Trk(y_idx).FMotion.X[:,ff] = XX Trk(y_idx).FMotion.P[:,:,ff] = PP Trk(y_idx).label = copy(Trk[t_idx].label) Trk(y_idx).type = copy('High') rm_idx = concat([rm_idx,t_idx]) else: m_idx = matching[1,m] - len(high_indx) t_idx = low_indx[matching[0,m]] y_idx = yidx[m_idx] for i in range(len(Trk[t_idx].hyp.score),fr): Trk[t_idx].hyp.score.append(0) Trk[t_idx].hyp.ystate.append([]) Trk[t_idx].hyp.ystates_ids.append(-1) Trk[t_idx].hyp.score.append(Affinity[m]) Trk[t_idx].hyp.ystate.append(ystate[y_idx]) Trk[t_idx].hyp.ystates_ids.append(ystates_ids[y_idx]) Trk[t_idx].hyp.new_tmpl = yhist[:,:,y_idx] Trk[t_idx].last_update = fr Obs_grap[fr].iso_idx[y_idx] = 0 if len(rm_idx) != 0: Trk[rm_idx] = [] return Trk,Obs_grap
def MOT_Local_Association(Trk=None, detections=None, Obs_grap=None, param=None, fr=None, rgbimg=None, nargout1=None, *args, **kwargs): ILDA = param.ILDA Z_meas = detections[fr] #represent the detections in current frame Z_meas = [detection[2:6] for detection in Z_meas ] #choose the location(x,y,w,h) from raw data ystates_ids = [detection[1] for detection in detections[fr]] ystate = Z_meas obs_grap = Obs_Graph() Obs_grap.append(obs_grap) Obs_grap[fr].iso_idx = np.ones((len(detections[fr]))) obs_info = Obs_info() obs_info.ystate = [] obs_info.yhist = [] obs_info.ystates_ids = [] if ~(np.all(ystate == 0)): yhist = mot_appearance_model_generation( rgbimg, param, ystate, True) #get the apperance model of the detections in current frame obs_info.ystate = ystate obs_info.yhist = yhist obs_info.ystates_ids = ystates_ids tidx, _, _ = Idx2Types( Trk, 'High') #return the index of high confidence Tracklet yidx = np.where(Obs_grap[fr].iso_idx == 1)[ 0] #return the index of detections in current frame if len(tidx) != 0 and len(yidx) != 0: Trk_high = [] Z_set = [] trk_label = [] conf_set = [] #to generate a list of high confidence tracklet for ii in range(0, len(tidx)): i = tidx[ii] temp_Trk_high = Tracklet() temp_Trk_high.hist = Trk[i].A_Model temp_Trk_high.FMotion = Trk[i].FMotion temp_Trk_high.last_update = Trk[i].last_update temp_Trk_high.h = Trk[i].state[-1][3] temp_Trk_high.w = Trk[i].state[-1][2] temp_Trk_high.type = Trk[i].type Trk_high.append(temp_Trk_high) trk_label.append(Trk[i].label) conf_set.append(Trk[i].Conf_prob) # For detections in current frame meas_label = [] for jj in range(0, len(yidx)): j = yidx[ jj] #get the index of the j detection of detections in current frames z_item = Z_item( ) #generate a object in order to record every detection's information include x,y,w,h, and Apperance model z_item.hist = yhist[:, :, j] z_item.pos = [ystate[j][0], ystate[j][1]] z_item.h = ystate[j][3] z_item.w = ystate[j][2] Z_set.append(z_item) meas_label.append(j) thr = param.obs_thr #generate the score matrix between Tracklet with high confidence and Detections in current frame score_mat = mot_eval_association_matrix(Trk_high, Z_set, param, 'Obs', ILDA) confidence = [] for i in Trk: confidence.append(i.Conf_prob) print(score_mat) #matching by hungarian Algorithm the the socre matrix shape should be[len(high_trk),len(high_trk] matching, __ = mot_association_hungarian(score_mat, thr, nargout=2) print(matching) if matching.size != 0: for i in range(0, matching.shape[1]): ass_idx_row = matching[0, i] ta_idx = tidx[ass_idx_row] ass_idx_col = matching[1, i] ya_idx = yidx[ass_idx_col] ListInsert(Trk[ta_idx].hyp.score, fr, score_mat[matching[0, i], matching[1, i]], 0) ListInsert(Trk[ta_idx].hyp.ystate, fr, ystate[ya_idx], []) ListInsert(Trk[ta_idx].hyp.ystates_ids, fr, ystates_ids[ya_idx], -1) #Trk[ta_idx].hyp.score.append(score_mat[matching[0,i],matching[1,i]]) #Trk[ta_idx].hyp.ystate.append(ystate[ya_idx]) Trk[ta_idx].hyp.new_tmpl = yhist[:, :, ya_idx] Trk[ta_idx].last_update = fr Obs_grap[fr].iso_idx[ya_idx] = 0 return Trk, Obs_grap, obs_info
def MOT_Init_Tracklets_Generation(init_Trks, window_tracklets, cfr, param): for tracklet in window_tracklets: #if one tracklet in the window has never been matched for 5 frames above,cut this part of this window tracklet and make it a new init tracklet if cfr - tracklet.ifr + 1 == 5: ifr = tracklet.ifr new_init_tracklet = Tracklet() new_init_tracklet.Conf_prob = tracklet.Conf_prob new_init_tracklet.type = tracklet.type new_init_tracklet.reliable = tracklet.reliable new_init_tracklet.isnew = 1 new_init_tracklet.sub_img = [] new_init_tracklet.status = tracklet.status new_init_tracklet.label = tracklet.label new_init_tracklet.ifr = ifr new_init_tracklet.efr = -1 new_init_tracklet.last_update = cfr new_init_tracklet.state = [[]] * (cfr + 1) new_init_tracklet.A_model_list = tracklet.A_model_list[:cfr] new_init_tracklet.FMotion.X = tracklet.FMotion.X[:, cfr].copy() new_init_tracklet.FMotion.P = tracklet.FMotion.P[:, :, cfr].copy() new_init_tracklet.hyp.score = tracklet.hyp.score[:cfr].copy() new_init_tracklet.hyp.ystate = tracklet.hyp.score[:cfr].copy() new_init_tracklet.state = tracklet.state[:cfr].copy() #remove the information of the part which has already become a new init_tracklet fr = cfr while 1: if len(tracklet.hyp.ystate[fr]) > 0: tracklet.ifr = fr break fr += 1 for i in range(cfr + 1 - 5, cfr + 1): tracklet.state[i] = [] tracklet.A_model_list[i] = None tracklet.FMotion.X[:, i] = 0 tracklet.FMotion.P[:, :, i] = 0 tracklet.hyp.score[i] = 0 tracklet.hyp.ystate[i] = [] tracklet.state[i] = [] mot_appearance_model_update(new_init_tracklet) init_Trks.append(new_init_tracklet) #if the window_tracklet of which apart is concated with another init tracklet,and the rest tracklet is already empty,then delete it from the window tracklet list if checkNone(tracklet) == True: window_tracklets.remove(tracklet) print("delete 1 window tracklet...") print("Init Tracklet Generated:len(Trk)+1")
def MOT_Global_Association(Trk = None,Tracklets_window = None,param = None,fr = None,Obs_grap_window = None,detections = None,*args,**kwargs): obs_grap = Obs_Graph() Obs_grap_window.append(obs_grap) Obs_grap_window[fr].iso_idx = np.ones((len(detections[fr]))) ILDA = param.ILDA Refer = [] Test = [] all_indx = [i for i in range(0,len(Trk))] all_indx_window = [i for i in range(0,len(Tracklets_window))] low_indx,_,_ = Idx2Types(Tracklets_window,'Low') high_indx = np.setdiff1d(all_indx_window,low_indx) init_indx = all_indx init_trk = [] High_trk = [] Low_trk = [] window_trk = [] Y_set = [] if len(high_indx) !=0: # For tracklets with low confidence in the window for ii in range(0,len(low_indx)): i = low_indx[ii] temp_Trk_low = Tracklet() temp_Trk_low.hist = Tracklets_window[i].A_Model temp_Trk_low.FMotion = Tracklets_window[i].FMotion temp_Trk_low.last_update = Tracklets_window[i].last_update temp_Trk_low.h = Tracklets_window[i].state[-1][3] temp_Trk_low.w = Tracklets_window[i].state[-1][2] temp_Trk_low.type = Tracklets_window[i].type temp_Trk_low.end_time = Tracklets_window[i].efr Low_trk.append(temp_Trk_low) # For tracklet with high confidence in the window for jj in range(0,len(all_indx_window)): j = all_indx_window[jj] temp_Trk_high = Tracklet() temp_Trk_high.hist = Tracklets_window[j].A_model_tail print(jj,j,len(Tracklets_window[j].state),Tracklets_window[j].state) temp_Trk_high.h = Tracklets_window[j].state[-1][3] temp_Trk_high.w = Tracklets_window[j].state[-1][2] temp_Trk_high.FMotion = Tracklets_window[j].FMotion XX,PP = mot_motion_model_generation(Tracklets_window[j],param,'Backward',nargout = 2) temp_Trk_high.BMotion.X = XX temp_Trk_high.BMotion.P = PP temp_Trk_high.last_update = Tracklets_window[j].last_update temp_Trk_high.init_time = Tracklets_window[j].ifr temp_Trk_high.end_time = Tracklets_window[j].efr window_trk.append(temp_Trk_high) iso_label = [] for jj in range(0,len(all_indx)): j = all_indx[jj] temp_init_trk = Tracklet() temp_init_trk.hist = Trk[j].A_model_tail temp_init_trk.h = Trk[j].state[-1][3] temp_init_trk.w = Trk[j].state[-1][2] temp_init_trk.FMotion = Trk[j].FMotion XX,PP = mot_motion_model_generation(Trk[j],param,'Backward',nargout = 2) temp_init_trk.BMotion.X = XX temp_init_trk.BMotion.P = PP temp_init_trk.last_update = Trk[j].last_update temp_init_trk.init_time = Trk[j].ifr temp_init_trk.end_time = Trk[j].last_update init_trk.append(temp_init_trk) thr = param.obs_thr score_trk = mot_eval_association_matrix(init_trk,window_trk,param,'Trk',ILDA) print("score_trk matrix:",score_trk) matching,Affinity = mot_association_hungarian(score_trk,thr) alpha = param.alpha rm_idx = [] print("matching matrix:",matching) #association the tracklet in the window to init tracklets for m in range(0,len(Affinity)): w_idx = all_indx_window[matching[0,m]] init_idx = all_indx[matching[1,m]] fr1 = Tracklets_window[w_idx].ifr fr2 = Tracklets_window[w_idx].last_update for kk in range(fr1,fr2): ListInsert(Trk[init_idx].state,kk,Tracklets_window[w_idx].state[kk],[]) numHyp = len(Tracklets_window[w_idx].hyp.score) #copy hyp information from window_tracklets to init_tracklets for kk in range(fr1,numHyp): mot_tracklets_concat(Trk[init_idx],Tracklets_window[w_idx],kk,param) XX = np.array([0,0,0,0]) numState = len(Trk[init_idx].state) XX[0] = Trk[init_idx].state[fr1][0] XX[2] = Trk[init_idx].state[fr1][1] PP = param.P Trk[init_idx].type = 'High' mot_appearance_model_update(Trk[init_idx]) rm_idx.append(w_idx) if len(rm_idx) != 0: print("removed tracklets ids of which are {}".format(rm_idx)) for idx in sorted(rm_idx,reverse=True): #mot_count_ids(tracklets_window[idx],param) Tracklets_window.pop(idx) return Trk
def MOT_Window_Association(Tracklets_window=None, param=None, fr=None, detections=None, Obs_grap_window=None, *args, **kwargs): #get the head and tail list of tracklet in order to decline the calculation head_index, tail_index = getHeadAndTailTrk(Tracklets_window) ILDA = param.ILDA all_indx_window = [i for i in range(0, len(Tracklets_window))] window_trk_head = [] window_trk_tail = [] High_trk = [] Low_trk = [] Y_set = [] # For trackletin the window with head A_Model for jj in range(0, len(head_index)): j = head_index[jj] temp_Trk = Tracklet() temp_Trk.hist = Tracklets_window[j].A_model_tail temp_Trk.h = Tracklets_window[j].state[-1][3] temp_Trk.w = Tracklets_window[j].state[-1][2] temp_Trk.FMotion = Tracklets_window[j].FMotion XX, PP = mot_motion_model_generation(Tracklets_window[j], param, 'Forward') temp_Trk.BMotion.X = XX temp_Trk.BMotion.P = PP temp_Trk.last_update = Tracklets_window[j].last_update temp_Trk.init_time = Tracklets_window[j].ifr temp_Trk.end_time = Tracklets_window[j].last_update window_trk_head.append(temp_Trk) iso_label = [] # For trackletin the window with tail A_Model for jj in range(0, len(tail_index)): j = tail_index[jj] temp_Tracklets_window = Tracklet() temp_Tracklets_window.hist = Tracklets_window[j].A_model_head temp_Tracklets_window.h = Tracklets_window[j].state[ Tracklets_window[j].ifr][3] temp_Tracklets_window.w = Tracklets_window[j].state[ Tracklets_window[j].ifr][2] temp_Tracklets_window.FMotion = Tracklets_window[j].FMotion XX, PP = mot_motion_model_generation(Tracklets_window[j], param, 'Backward') temp_Tracklets_window.BMotion.X = XX temp_Tracklets_window.BMotion.P = PP temp_Tracklets_window.last_update = Tracklets_window[j].last_update temp_Tracklets_window.init_time = Tracklets_window[j].ifr temp_Tracklets_window.end_time = Tracklets_window[j].last_update window_trk_tail.append(temp_Tracklets_window) thr = param.obs_thr print(1111) score_trk = mot_eval_association_matrix(window_trk_head, window_trk_tail, param, 'Trk', ILDA) print(score_trk, "score_trk matrix:") matching, Affinity = mot_association_hungarian(score_trk, thr) alpha = param.alpha rm_idx = [] print(matching, "matching matrix:") #association the tracklet in the window to init tracklets for m in range(0, len(Affinity)): head_idx = head_index[matching[0, m]] tail_idx = tail_index[matching[1, m]] print( "concat the tracklets whose ids is {} and {}".format( head_idx, tail_idx), "----" * 30) fr1 = Tracklets_window[tail_idx].ifr fr2 = Tracklets_window[tail_idx].last_update for kk in range(fr1, fr2): ListInsert(Tracklets_window[head_idx].state, kk, Tracklets_window[tail_idx].state[kk], []) numHyp = len(Tracklets_window[tail_idx].hyp.score) #copy hyp information from window_tracklets to init_tracklets for kk in range(fr1, numHyp): mot_tracklets_concat(Tracklets_window[head_idx], Tracklets_window[tail_idx], kk, param) mot_check_idsw(Tracklets_window[head_idx]) XX = np.array([0, 0, 0, 0]) numState = len(Tracklets_window[head_idx].state) XX[0] = Tracklets_window[head_idx].state[fr1][0] XX[2] = Tracklets_window[head_idx].state[fr1][1] PP = param.P Tracklets_window[head_idx].type = 'High' mot_appearance_model_update(Tracklets_window[head_idx], param) rm_idx.append(tail_idx) if len(rm_idx) != 0: print("removed tracklets ids of which are {}".format(rm_idx)) for idx in sorted(rm_idx, reverse=True): Tracklets_window.pop(idx) return Tracklets_window, Obs_grap_window
def mot_tracklets_components_setup(img=None, Trk=None, detections=None, cfr=None, ass_idx=None, param=None, tmp_label=None, Obs_grap=None, *args, **kwargs): #ass_idx the tracklet state nofa = len(np.where(np.array(ass_idx) != -1)[0]) #record the detections id which refer to the object ystate_id_list = [-1] * cfr tracklet = Tracklet() #genenrate a new tracklet tracklet.Conf_prob = param.init_prob tracklet.type = 'High' tracklet.reliable = 'False' tracklet.isnew = 1 tracklet.sub_img = [] tracklet.status = 'none' tracklet.hyp.score = [] tracklet.ifr = cfr - nofa tracklet.efr = 0 tracklet.last_update = cfr - 1 tracklet.window_end = cfr - 1 Acc_tmpl = np.zeros(((param.Bin * 3), param.subregion)) A_Model_list = [None] * (cfr - nofa) #from crruent frame to track the len(ass_idx) privious frames detection for i in range(0, nofa): state = np.array((4)) tmp_idx = cfr - i - 1 temp_state = np.array( detections[tmp_idx][ass_idx[tmp_idx]][2:6].copy()) temp_affinity = Obs_grap[tmp_idx].child_A_Model_affinity[ ass_idx[tmp_idx]] ystate_id_list.append(detections[tmp_idx][ass_idx[tmp_idx]][1]) ListInsert(tracklet.hyp.score, tmp_idx, temp_affinity, 0) ListInsert(tracklet.state, tmp_idx, temp_state, []) ListInsert(tracklet.detections_id_list, tmp_idx, ass_idx[tmp_idx], None) tmpl = mot_appearance_model_generation(img[tmp_idx], param, temp_state, False).squeeze() A_Model_list.append(tmpl) tracklet.A_model_list = A_Model_list ystate_id_list.reverse() # Appearnce Model tracklet.A_Model = Acc_tmpl / nofa # Forward Motion Model # XX [4,frames] XX, PP = mot_motion_model_generation(tracklet, param, 'Forward') lt = XX.shape[1] tracklet.FMotion.X = np.zeros((4, cfr)) tracklet.FMotion.P = np.zeros((4, 4, cfr)) tracklet.FMotion.X[:, cfr - lt:] = XX tracklet.FMotion.P[:, :, cfr - lt:] = PP tracklet.BMotion.X = [] tracklet.BMotion.P = [] tracklet.hyp.ystates_id = [0] * cfr tracklet.hyp.ystate = [item for item in tracklet.state] tracklet.hyp.ystates_id = ystate_id_list mot_check_idsw(tracklet) mot_get_label(Trk, tracklet, param) Trk.append(tracklet) #count the number of the total tracklets param.total_tracklet_count += 1 if (tracklet.window_end - tracklet.ifr + 1) > 5: mot_tracklet_confidence_update(tracklet, param, param.lambda_) mot_appearance_model_update(tracklet, param) #label the used detections as -1 in iso_idx and child used_idx = [] nT = len(np.where(np.array(ass_idx) != -1)[0]) for hh in range(0, nT): h = -nT + hh used_idx.append(ass_idx[h]) for i in range(0, len(used_idx)): iden = used_idx[i] Obs_grap[cfr + i - nT].iso_idx[iden] = -1 Obs_grap[cfr + i - nT].child[iden] = -1 print("Window Tracklet Generated:len(Trk_Window)+1", ass_idx[-nofa - 2:]) return Trk, param
def mot_tracklets_components_setup(img=None, Trk=None, detections=None, cfr=None, y_idx=None, param=None, tmp_label=None, initTracklet=False, *args, **kwargs): #y_idx the tracklet state ass_idx = y_idx nofa = len(np.where(np.array(y_idx) != -1)[0]) #record the detections id which refer to the object ystate_id_list = [] tracklet = Tracklet() #genenrate a new tracklet tracklet.Conf_prob = param.init_prob tracklet.type = 'High' tracklet.reliable = 'False' tracklet.isnew = 1 tracklet.sub_img = [] tracklet.status = 'none' if tmp_label != None: tracklet.label = tmp_label else: param, idx = Labelling(param) tracklet.label = idx tracklet.ifr = cfr - nofa tracklet.efr = 0 tracklet.last_update = cfr - 1 Acc_tmpl = np.zeros(((param.Bin * 3), param.subregion)) for i in range( 0, nofa ): #from crruent frame to track the len(ass_idx) privious frames detections state = np.array((4)) tmp_idx = cfr - i - 1 temp_state = np.array( detections[tmp_idx][ass_idx[tmp_idx][0]][2:6].copy()) ystate_id_list.append(detections[tmp_idx][ass_idx[tmp_idx][0]][1]) ListInsert(tracklet.state, tmp_idx, temp_state, []) #tracklet.state.append(temp_state) #tracklet.state[tmp_idx][3,1]=detections(tmp_idx).h(ass_idx(tmp_idx)) tmpl = mot_appearance_model_generation(img[tmp_idx], param, temp_state, False) Acc_tmpl = Acc_tmpl + tmpl.squeeze()[:, np.newaxis] ystate_id_list.reverse() # Appearnce Model tracklet.A_Model = Acc_tmpl / nofa # Forward Motion Model # XX [4,frames] XX, PP = mot_motion_model_generation(tracklet, param, 'Forward', nargout=2) lt = XX.shape[1] tracklet.FMotion.X = np.zeros((4, cfr)) tracklet.FMotion.P = np.zeros((4, 4, cfr)) tracklet.FMotion.X[:, cfr - lt:] = XX tracklet.FMotion.P[:, :, cfr - lt:] = PP tracklet.BMotion.X = [] tracklet.BMotion.P = [] tracklet.hyp.score = [0] * cfr tracklet.hyp.ystate = [item for item in tracklet.state] tracklet.hyp.ystates_ids = ystate_id_list Trk.append(tracklet) #count the number of the total tracklets param.total_tracklet_count += 1 return Trk, param