예제 #1
0
def ssc_main(dataSource, VideoIndex):
    import DataPathclass
    global DataPathobj
    DataPathobj = DataPathclass.DataPath(dataSource, VideoIndex)
    import parameterClass
    global Parameterobj
    Parameterobj = parameterClass.parameter(dataSource, VideoIndex)

    clsObj = trjClusteringFromAdj()

    existingFiles = sorted(glob.glob(DataPathobj.sscpath + '*.mat'))
    existingFileNames = []
    for jj in range(len(existingFiles)):
        existingFileNames.append(int(existingFiles[jj][-7:-4]))

    for matidx in range(len(clsObj.adjmatfiles)):
        if (matidx + 1) in existingFileNames:
            print "alredy processed ", str(matidx + 1)
            continue
        else:
            print "clustering trj based on adj truncation ", matidx
            clsObj.trjclustering(matidx)

            if isSave:
                clsObj.saveLabel(matidx)
            if isVisualize:
                clsObj.visLabel(matidx)
def trjcluster_func_SBS_main(dataSource,VideoIndex):
    import DataPathclass 
    global DataPathobj
    DataPathobj = DataPathclass.DataPath(dataSource,VideoIndex)
    import parameterClass 
    global Parameterobj
    Parameterobj = parameterClass.parameter(dataSource,VideoIndex)

    adjObj = adjacencyMatrix()


    existingFiles = sorted(glob.glob(DataPathobj.adjpath+'*.mat'))
    existingFileNames = []
    for jj in range(len(existingFiles)):
        existingFileNames.append(int(existingFiles[jj][-7:-4]))
    
    # pdb.set_trace()
    for matidx in range(len(adjObj.matfiles)):
        if (matidx+1)  in existingFileNames:
            print "alredy processed ", str(matidx+1)
            continue

        print "building adj mtx ....", matidx
        adjObj.prepare_input_data(matidx)
        """First cluster using just direction Information"""
        adjObj.directionGroup()
        adjObj.adjConstruct(matidx)


        print "saving adj..."
        adjObj.saveADJ(matidx)
        """ visualization, see if connected components make sense"""
예제 #3
0
def dic_main(dataSource, VideoIndex):
    import DataPathclass
    global DataPathobj
    DataPathobj = DataPathclass.DataPath(dataSource, VideoIndex)
    import parameterClass
    global Parameterobj
    Parameterobj = parameterClass.parameter(dataSource, VideoIndex)
    global subSampRate
    # subSampRate = int(np.round(DataPathobj.cap.get(cv2.cv.CV_CAP_PROP_FPS)/Parameterobj.targetFPS))
    subSampRate = int(30.0 / Parameterobj.targetFPS)

    existingFiles = sorted(glob.glob(DataPathobj.dicpath + '*final_vcxtrj*.p'))
    existingFileNames = []
    for jj in range(len(existingFiles)):
        existingFileNames.append(int(
            existingFiles[jj][-5:-2]))  # files starts from 0

    if len(existingFileNames) > 0:
        print "alredy processed from 0 to ", str(max(existingFileNames))
        start_frame_idx = max(existingFileNames) * 3600 + 3600
        # print "processing", str(15)
        # start_frame_idx = (15-1)*3600+3600
    else:
        start_frame_idx = 0
    print "start_frame_idx: ", start_frame_idx

    trj2dicObj = trj2dic()
    VCobj = VirtualCenter()
    videoReadingObj = videoReading(DataPathobj.video, subSampRate)
    trj2dicObj.get_XYT_inDic(start_frame_idx, videoReadingObj, VCobj)
예제 #4
0
def trjcluster_func_SBS_main(dataSource, VideoIndex):
    import DataPathclass
    global DataPathobj
    DataPathobj = DataPathclass.DataPath(dataSource, VideoIndex)
    import parameterClass
    global Parameterobj
    Parameterobj = parameterClass.parameter(dataSource, VideoIndex)

    adjObj = adjacencyMatrix()

    existingFiles = sorted(glob.glob(DataPathobj.adjpath + '*.mat'))
    existingFileNames = []
    for jj in range(len(existingFiles)):
        existingFileNames.append(int(existingFiles[jj][-7:-4]))

    # pdb.set_trace()
    for matidx in range(len(adjObj.matfiles)):
        if (matidx + 1) in existingFileNames:
            print "alredy processed ", str(matidx + 1)
            continue

        print "building adj mtx ....", matidx
        adjObj.prepare_input_data(matidx)
        """First cluster using just direction Information"""
        adjObj.directionGroup()
        adjObj.adjConstruct(matidx)

        print "saving adj..."
        adjObj.saveADJ(matidx)
        """ visualization, see if connected components make sense"""
예제 #5
0
def fit_extrapolate_main(dataSource,VideoIndex):		
	# define start and end regions
	#Canal video's dimensions:
	# """(528, 704, 3)
	# start :<=100,
	# end: >=500,"""

	import DataPathclass 
	global DataPathobj
	DataPathobj = DataPathclass.DataPath(dataSource,VideoIndex)
	import parameterClass 
	global Parameterobj
	Parameterobj = parameterClass.parameter(dataSource,VideoIndex)

	start_Y = 100;
	end_Y   = 500;
	
	# matfilepath    = '/Users/Chenge/Desktop/testklt/'
	matfilepath = DataPathobj.kltpath
	matfiles       = sorted(glob.glob(matfilepath + '*.mat'))
	# matfiles       = sorted(glob.glob(matfilepath + 'klt_*.mat'))
	# matfiles       = sorted(glob.glob(matfilepath + 'sim*.mat'))
	start_position =  0
	matfiles       = matfiles[start_position:]

	existingFiles = sorted(glob.glob(DataPathobj.smoothpath+'*.mat'))
	existingFileNames = []
	for jj in range(len(existingFiles)):
		existingFileNames.append(int(existingFiles[jj][-7:-4]))
	
	# for matidx,matfile in enumerate(matfiles):
	for matidx in range(len(matfiles)):
		if (matidx+1)  in existingFileNames:
			print "alredy processed ", str(matidx+1)
			continue
		matfile = matfiles[matidx]
		# "if consecutive points are similar to each other, merge them, using one to represent"
		# didn't do this, smooth and resample instead
		print "reading data", matfile
		x,y,t,ptstrj = readData(matfile)
		print "get spatial and temporal smooth matrix"
		x_spatial_smooth_mtx,y_spatial_smooth_mtx,x_time_smooth_mtx,y_time_smooth_mtx, xspd_smooth_mtx,yspd_smooth_mtx = getSmoothMtx(x,y,t)
		"""delete all-zero rows"""
		good_index_before_filtering = np.where(np.sum(x_spatial_smooth_mtx,1)!=0)
		x_spatial_smooth_mtx = x_spatial_smooth_mtx[good_index_before_filtering,:][0,:,:]
		y_spatial_smooth_mtx = y_spatial_smooth_mtx[good_index_before_filtering,:][0,:,:]
		x_time_smooth_mtx    = x_time_smooth_mtx[good_index_before_filtering,:][0,:,:]
		y_time_smooth_mtx    = y_time_smooth_mtx[good_index_before_filtering,:][0,:,:]
		xspd_smooth_mtx      = xspd_smooth_mtx[good_index_before_filtering,:][0,:,:]
		yspd_smooth_mtx      = yspd_smooth_mtx[good_index_before_filtering,:][0,:,:]
		t = t[good_index_before_filtering,:][0,:,:]
		# plotTrj(x_smooth_mtx,y_smooth_mtx)
		
		print "filtering out bad trajectories"
		goodTrj = filtering(x_spatial_smooth_mtx,y_spatial_smooth_mtx,xspd_smooth_mtx,yspd_smooth_mtx,t)

		# kmeansPolyCoeff(p3)
		# plotTrj(x_spatial_smooth_mtx,y_spatial_smooth_mtx,t,Trjchoice = goodTrj)
		print "saving=======!!"
		saveSmoothMat(x_time_smooth_mtx,y_time_smooth_mtx,xspd_smooth_mtx,yspd_smooth_mtx,goodTrj,ptstrj,matfile)
def ssc_main(dataSource,VideoIndex):
    import DataPathclass 
    global DataPathobj
    DataPathobj = DataPathclass.DataPath(dataSource,VideoIndex)
    import parameterClass 
    global Parameterobj
    Parameterobj = parameterClass.parameter(dataSource,VideoIndex)

    clsObj = trjClusteringFromAdj()


    existingFiles = sorted(glob.glob(DataPathobj.sscpath+'*.mat'))
    existingFileNames = []
    for jj in range(len(existingFiles)):
        existingFileNames.append(int(existingFiles[jj][-7:-4]))
    
    for matidx in range(len(clsObj.adjmatfiles)):
        if (matidx+1)  in existingFileNames:
            print "alredy processed ", str(matidx+1)
            continue
        else:   
            print "clustering trj based on adj truncation ", matidx
            clsObj.trjclustering(matidx)

            if isSave:
                clsObj.saveLabel(matidx)
            if isVisualize:
                clsObj.visLabel(matidx)
def unify_main(dataSource,VideoIndex):
    import DataPathclass 
    global DataPathobj
    DataPathobj = DataPathclass.DataPath(dataSource,VideoIndex)
    import parameterClass 
    global Parameterobj
    Parameterobj = parameterClass.parameter(dataSource,VideoIndex)

    global useCC
    useCC = False
    if Parameterobj.useWarpped:
        filePrefix = 'usewarpped'
    else:
        # filePrefix = 'Aug12'
        # filePrefix = 'Aug10'
        filePrefix = 'Aug15'

    label_choice = Parameterobj.clustering_choice
    if useCC:
        matfilesAll = sorted(glob.glob(DataPathobj.adjpath +filePrefix+'*.mat')) 
    else:
        matfilesAll = sorted(glob.glob(DataPathobj.sscpath +filePrefix+'*.mat'))

 
    numTrunc = len(matfilesAll)
    savename = ''
    if numTrunc<=200:
        if useCC:
            savename = 'concomp'+savename
        else:
            savename = 'Complete_result'+savename
        if Parameterobj.useWarpped:
            savename = 'usewarpped_'+savename

        unify_label(matfilesAll,savename,label_choice)
    else:
        for kk in range(0,numTrunc,25):
            print "saved trunk",str(kk+1).zfill(3),'to' ,str(min(kk+25,numTrunc)).zfill(3)
            matfiles = matfilesAll[kk:min(kk+25,numTrunc)]
            savename = os.path.join(DataPathobj.unifiedLabelpath,'result_'+label_choice+str(kk+1).zfill(3)+'-'+str(min(kk+25,numTrunc)).zfill(3))
            unify_label(matfiles,savename,label_choice)
예제 #8
0
def pair_main(dataSource, VideoIndex,folderName):
	import DataPathclass 
	global DataPathobj
	DataPathobj = DataPathclass.DataPath(dataSource,VideoIndex)
	import parameterClass 
	global Parameterobj
	Parameterobj = parameterClass.parameter(dataSource,VideoIndex)

	# fps = 5
	# subSampRate = 6
	fps = Parameterobj.targetFPS
	# cap = cv2.VideoCapture(DataPathobj.video)
	# subSampRate = int(np.round(cap.get(cv2.cv.CV_CAP_PROP_FPS)))/Parameterobj.targetFPS
	subSampRate = int(30)/Parameterobj.targetFPS
	overlap_pair_threshold = 1*fps
	# if len(glob.glob(os.path.join(DataPathobj.pairpath,'obj_pair2.p')))!=0 and len(glob.glob(os.path.join(DataPathobj.pairpath,'all_final_clusterSize.p')))!=0:
	# 	print "already have obj_pair, loading...!"
	# 	obj_pair2 = pickle.load(open(os.path.join(DataPathobj.pairpath,'obj_pair2.p'),'rb'))
	# 	test_clusterSize = pickle.load( open( os.path.join(DataPathobj.pairpath,'all_final_clusterSize.p'), "rb" ) )
	# else:
	test_vctime,test_vcxtrj,test_vcytrj,test_clusterSize,image_list = prepare_data(isAfterWarpping,dataSource,folderName)
	obj_pair = Trj_class_and_func_definitions.TrjObj(test_vcxtrj,test_vcytrj,test_vctime,subSampRate = subSampRate)
	# badkeys  = obj_pair.bad_IDs1+obj_pair.bad_IDs2+obj_pair.bad_IDs4
	badkeys  = obj_pair.bad_IDs1+obj_pair.bad_IDs2

	for key in badkeys:
		del test_vctime[key]
		del test_vcxtrj[key]
		del test_vcytrj[key]
	clean_vctime = test_vctime
	clean_vcxtrj = test_vcxtrj
	clean_vcytrj = test_vcytrj
	print "trj remaining: ", str(len(clean_vctime))
	# rebuild this object using filtered data, should be no bad_IDs
	obj_pair2 = Trj_class_and_func_definitions.TrjObj(clean_vcxtrj,clean_vcytrj,clean_vctime,subSampRate = subSampRate)
	pickle.dump(obj_pair2,open(os.path.join(DataPathobj.pairpath,'obj_pair2.p'),'wb'))
	pickle.dump(test_clusterSize, open( os.path.join(DataPathobj.pairpath,'all_final_clusterSize.p'), "wb" ) )

	""" write clustered Trj infor(not-pairing): clusterID, virtual center X, vc Y, Y direction, X direction """
	# obj2write = obj_pair2
	# savename  = os.path.join(DataPathobj.pairpath,'Trj_with_ID_frm.csv')
	# writer    = csv.writer(open(savename,'wb'))
	# writer.writerow(['trj ID','frame','x','y','y direction','x direction'])
	# temp      = []
	# for kk in range(np.size(obj2write.Trj_with_ID_frm,0)):
	# 	temp   =  obj2write.Trj_with_ID_frm[kk]
	# 	curkey =  obj2write.Trj_with_ID_frm[kk][0]
	# 	temp.append(obj2write.Ydir[curkey])
	# 	temp.append(obj2write.Xdir[curkey])
	# 	writer.writerow(temp)

	# pickle.dump( obj_pair.Trj_with_ID_frm, open( "./mat/20150222_Mat/singleListTrj.p", "wb" ) ) 
	# singleListTrj = pickle.load(open( "./mat/20150222_Mat/singleListTrj.p", "rb" ) )


	#=======visualize the pair relationship==============================================
	if isVisualize:
		isVideo  = True
		if isVideo:
			dataPath = DataPathobj.video 
			global cap
			cap       = cv2.VideoCapture(dataPath)
			cap.set ( cv2.cv.CV_CAP_PROP_POS_FRAMES ,0)
			status, firstfrm = cap.read()
			framenum = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
		else:
			image_list = sorted(glob.glob('../DoT/[email protected]/[email protected]_2015-06-16_16h03min52s762ms/*.jpg')) # only 2000 pictures
			firstfrm =cv2.imread(image_list[0])
			framenum = int(len(image_list))
		nrows = int(np.size(firstfrm,0))
		ncols = int(np.size(firstfrm,1))
		
		# plt.figure(1,figsize =[10,12])
		# plt.figure()
		# axL     = plt.subplot(1,1,1)
		# frame   = np.zeros([nrows,ncols,3]).astype('uint8')
		# im      = plt.imshow(np.zeros([nrows,ncols,3]))
		# plt.axis('off')
		# color_choice = np.array([np.random.randint(0,255) for _ in range(3*int(max(obj_pair2.globalID)))]).reshape(int(max(obj_pair2.globalID)),3)
		# colors  = lambda: np.random.rand(50)
		plt.figure('testing')


	color_choice = np.array([np.random.randint(0,255) for _ in range(3*int(max(obj_pair2.globalID)))]).reshape(int(max(obj_pair2.globalID)),3)
	savenameCooccur = os.path.join(DataPathobj.pairpath,'pair_relationship_overlap3s.csv')
	global writerCooccur
	writerCooccur   = csv.writer(open(savenameCooccur,'wb'))
	writerCooccur.writerow(['trj1 ID','frame','x','y','y direction','x direction','trj2 ID','frame','x','y','y direction','x direction'])

	savename2  = os.path.join(DataPathobj.pairpath,'pairs_ID_overlap3s.csv')
	global writer2
	writer2    = csv.writer(open(savename2,'wb'))
	writer2.writerow(['trj2 ID','trj2 ID'])

	# global outputFile
	# outputFile = open(savenameCooccur,'a')


	obj_pair2loop   = obj_pair2
	for ind1 in range(len(obj_pair2loop.globalID)-1):
		for ind2 in range(ind1+1, min(len(obj_pair2loop.globalID),ind1+500)):
			loopVehicleID1 = obj_pair2loop.globalID[ind1]
			loopVehicleID2 = obj_pair2loop.globalID[ind2]

			if (sum(test_clusterSize[loopVehicleID1])== len(test_clusterSize[loopVehicleID1]))\
				or  (sum(test_clusterSize[loopVehicleID2])== len(test_clusterSize[loopVehicleID2])):
				# print "single trj as cluster!"
				continue

			# print "pairing: ",loopVehicleID1,' & ',loopVehicleID2
			if isVisualize:
				plt.cla()
				axL   = plt.subplot(1,1,1)
				global im
				im    = plt.imshow(np.zeros([nrows,ncols,3]))
				plt.axis('off')
			visualize_threshold  = fps*10 # only if a pair shared more than this many frames, show them
			pair_givenID_vis(loopVehicleID1, loopVehicleID2, obj_pair2loop,color_choice, isWrite = isWrite, \
							 isVisualize = isVisualize, visualize_threshold = visualize_threshold,overlap_pair_threshold = overlap_pair_threshold)
	"""use for signle testing in the end, show pairs given IDs"""
	# plt.figure('testing2')
	# axL         = plt.subplot(1,1,1)
	# im          = plt.imshow(np.zeros([nrows,ncols,3]))
	# plt.axis('off')
	# isWrite     = False
	# isVisualize = True
	# pair_givenID_vis(649, 703, obj_pair2loop,color_choice,isWrite, isVisualize ,visualize_threshold = 40)


	"""what IDs each frame has"""
def fit_extrapolate_main(dataSource, VideoIndex):
    # define start and end regions
    #Canal video's dimensions:
    # """(528, 704, 3)
    # start :<=100,
    # end: >=500,"""

    import DataPathclass
    global DataPathobj
    DataPathobj = DataPathclass.DataPath(dataSource, VideoIndex)
    import parameterClass
    global Parameterobj
    Parameterobj = parameterClass.parameter(dataSource, VideoIndex)

    start_Y = 100
    end_Y = 500

    # matfilepath    = '/Users/Chenge/Desktop/testklt/'
    matfilepath = DataPathobj.kltpath
    matfiles = sorted(glob.glob(matfilepath + '*.mat'))
    # matfiles       = sorted(glob.glob(matfilepath + 'klt_*.mat'))
    # matfiles       = sorted(glob.glob(matfilepath + 'sim*.mat'))
    start_position = 0
    matfiles = matfiles[start_position:]

    existingFiles = sorted(glob.glob(DataPathobj.smoothpath + '*.mat'))
    existingFileNames = []
    for jj in range(len(existingFiles)):
        existingFileNames.append(int(existingFiles[jj][-7:-4]))

    # for matidx,matfile in enumerate(matfiles):
    for matidx in range(len(matfiles)):
        if (matidx + 1) in existingFileNames:
            print "alredy processed ", str(matidx + 1)
            continue
        matfile = matfiles[matidx]
        # "if consecutive points are similar to each other, merge them, using one to represent"
        # didn't do this, smooth and resample instead
        print "reading data", matfile
        x, y, t, ptstrj = readData(matfile)
        print "get spatial and temporal smooth matrix"
        x_spatial_smooth_mtx, y_spatial_smooth_mtx, x_time_smooth_mtx, y_time_smooth_mtx, xspd_smooth_mtx, yspd_smooth_mtx = getSmoothMtx(
            x, y, t)
        """delete all-zero rows"""
        good_index_before_filtering = np.where(
            np.sum(x_spatial_smooth_mtx, 1) != 0)
        x_spatial_smooth_mtx = x_spatial_smooth_mtx[
            good_index_before_filtering, :][0, :, :]
        y_spatial_smooth_mtx = y_spatial_smooth_mtx[
            good_index_before_filtering, :][0, :, :]
        x_time_smooth_mtx = x_time_smooth_mtx[good_index_before_filtering, :][
            0, :, :]
        y_time_smooth_mtx = y_time_smooth_mtx[good_index_before_filtering, :][
            0, :, :]
        xspd_smooth_mtx = xspd_smooth_mtx[good_index_before_filtering, :][
            0, :, :]
        yspd_smooth_mtx = yspd_smooth_mtx[good_index_before_filtering, :][
            0, :, :]
        t = t[good_index_before_filtering, :][0, :, :]
        # plotTrj(x_smooth_mtx,y_smooth_mtx)

        print "filtering out bad trajectories"
        goodTrj = filtering(x_spatial_smooth_mtx, y_spatial_smooth_mtx,
                            xspd_smooth_mtx, yspd_smooth_mtx, t)

        # kmeansPolyCoeff(p3)
        # plotTrj(x_spatial_smooth_mtx,y_spatial_smooth_mtx,t,Trjchoice = goodTrj)
        print "saving=======!!"
        saveSmoothMat(x_time_smooth_mtx, y_time_smooth_mtx, xspd_smooth_mtx,
                      yspd_smooth_mtx, goodTrj, ptstrj, matfile)
예제 #10
0
def pair_main(dataSource, VideoIndex, folderName):
    import DataPathclass
    global DataPathobj
    DataPathobj = DataPathclass.DataPath(dataSource, VideoIndex)
    import parameterClass
    global Parameterobj
    Parameterobj = parameterClass.parameter(dataSource, VideoIndex)

    # fps = 5
    # subSampRate = 6
    fps = Parameterobj.targetFPS
    # cap = cv2.VideoCapture(DataPathobj.video)
    # subSampRate = int(np.round(cap.get(cv2.cv.CV_CAP_PROP_FPS)))/Parameterobj.targetFPS
    subSampRate = int(30) / Parameterobj.targetFPS
    overlap_pair_threshold = 1 * fps
    # if len(glob.glob(os.path.join(DataPathobj.pairpath,'obj_pair2.p')))!=0 and len(glob.glob(os.path.join(DataPathobj.pairpath,'all_final_clusterSize.p')))!=0:
    # 	print "already have obj_pair, loading...!"
    # 	obj_pair2 = pickle.load(open(os.path.join(DataPathobj.pairpath,'obj_pair2.p'),'rb'))
    # 	test_clusterSize = pickle.load( open( os.path.join(DataPathobj.pairpath,'all_final_clusterSize.p'), "rb" ) )
    # else:
    test_vctime, test_vcxtrj, test_vcytrj, test_clusterSize, image_list = prepare_data(
        isAfterWarpping, dataSource, folderName)
    obj_pair = Trj_class_and_func_definitions.TrjObj(test_vcxtrj,
                                                     test_vcytrj,
                                                     test_vctime,
                                                     subSampRate=subSampRate)
    # badkeys  = obj_pair.bad_IDs1+obj_pair.bad_IDs2+obj_pair.bad_IDs4
    badkeys = obj_pair.bad_IDs1 + obj_pair.bad_IDs2

    for key in badkeys:
        del test_vctime[key]
        del test_vcxtrj[key]
        del test_vcytrj[key]
    clean_vctime = test_vctime
    clean_vcxtrj = test_vcxtrj
    clean_vcytrj = test_vcytrj
    print "trj remaining: ", str(len(clean_vctime))
    # rebuild this object using filtered data, should be no bad_IDs
    obj_pair2 = Trj_class_and_func_definitions.TrjObj(clean_vcxtrj,
                                                      clean_vcytrj,
                                                      clean_vctime,
                                                      subSampRate=subSampRate)
    pickle.dump(obj_pair2,
                open(os.path.join(DataPathobj.pairpath, 'obj_pair2.p'), 'wb'))
    pickle.dump(
        test_clusterSize,
        open(os.path.join(DataPathobj.pairpath, 'all_final_clusterSize.p'),
             "wb"))
    """ write clustered Trj infor(not-pairing): clusterID, virtual center X, vc Y, Y direction, X direction """
    # obj2write = obj_pair2
    # savename  = os.path.join(DataPathobj.pairpath,'Trj_with_ID_frm.csv')
    # writer    = csv.writer(open(savename,'wb'))
    # writer.writerow(['trj ID','frame','x','y','y direction','x direction'])
    # temp      = []
    # for kk in range(np.size(obj2write.Trj_with_ID_frm,0)):
    # 	temp   =  obj2write.Trj_with_ID_frm[kk]
    # 	curkey =  obj2write.Trj_with_ID_frm[kk][0]
    # 	temp.append(obj2write.Ydir[curkey])
    # 	temp.append(obj2write.Xdir[curkey])
    # 	writer.writerow(temp)

    # pickle.dump( obj_pair.Trj_with_ID_frm, open( "./mat/20150222_Mat/singleListTrj.p", "wb" ) )
    # singleListTrj = pickle.load(open( "./mat/20150222_Mat/singleListTrj.p", "rb" ) )

    #=======visualize the pair relationship==============================================
    if isVisualize:
        isVideo = True
        if isVideo:
            dataPath = DataPathobj.video
            global cap
            cap = cv2.VideoCapture(dataPath)
            cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, 0)
            status, firstfrm = cap.read()
            framenum = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
        else:
            image_list = sorted(
                glob.glob(
                    '../DoT/[email protected]/[email protected]_2015-06-16_16h03min52s762ms/*.jpg'
                ))  # only 2000 pictures
            firstfrm = cv2.imread(image_list[0])
            framenum = int(len(image_list))
        nrows = int(np.size(firstfrm, 0))
        ncols = int(np.size(firstfrm, 1))

        # plt.figure(1,figsize =[10,12])
        # plt.figure()
        # axL     = plt.subplot(1,1,1)
        # frame   = np.zeros([nrows,ncols,3]).astype('uint8')
        # im      = plt.imshow(np.zeros([nrows,ncols,3]))
        # plt.axis('off')
        # color_choice = np.array([np.random.randint(0,255) for _ in range(3*int(max(obj_pair2.globalID)))]).reshape(int(max(obj_pair2.globalID)),3)
        # colors  = lambda: np.random.rand(50)
        plt.figure('testing')

    color_choice = np.array([
        np.random.randint(0, 255)
        for _ in range(3 * int(max(obj_pair2.globalID)))
    ]).reshape(int(max(obj_pair2.globalID)), 3)
    savenameCooccur = os.path.join(DataPathobj.pairpath,
                                   'pair_relationship_overlap3s.csv')
    global writerCooccur
    writerCooccur = csv.writer(open(savenameCooccur, 'wb'))
    writerCooccur.writerow([
        'trj1 ID', 'frame', 'x', 'y', 'y direction', 'x direction', 'trj2 ID',
        'frame', 'x', 'y', 'y direction', 'x direction'
    ])

    savename2 = os.path.join(DataPathobj.pairpath, 'pairs_ID_overlap3s.csv')
    global writer2
    writer2 = csv.writer(open(savename2, 'wb'))
    writer2.writerow(['trj2 ID', 'trj2 ID'])

    # global outputFile
    # outputFile = open(savenameCooccur,'a')

    obj_pair2loop = obj_pair2
    for ind1 in range(len(obj_pair2loop.globalID) - 1):
        for ind2 in range(ind1 + 1, min(len(obj_pair2loop.globalID),
                                        ind1 + 500)):
            loopVehicleID1 = obj_pair2loop.globalID[ind1]
            loopVehicleID2 = obj_pair2loop.globalID[ind2]

            if (sum(test_clusterSize[loopVehicleID1])== len(test_clusterSize[loopVehicleID1]))\
             or  (sum(test_clusterSize[loopVehicleID2])== len(test_clusterSize[loopVehicleID2])):
                # print "single trj as cluster!"
                continue

            # print "pairing: ",loopVehicleID1,' & ',loopVehicleID2
            if isVisualize:
                plt.cla()
                axL = plt.subplot(1, 1, 1)
                global im
                im = plt.imshow(np.zeros([nrows, ncols, 3]))
                plt.axis('off')
            visualize_threshold = fps * 10  # only if a pair shared more than this many frames, show them
            pair_givenID_vis(loopVehicleID1, loopVehicleID2, obj_pair2loop,color_choice, isWrite = isWrite, \
                 isVisualize = isVisualize, visualize_threshold = visualize_threshold,overlap_pair_threshold = overlap_pair_threshold)
    """use for signle testing in the end, show pairs given IDs"""
    # plt.figure('testing2')
    # axL         = plt.subplot(1,1,1)
    # im          = plt.imshow(np.zeros([nrows,ncols,3]))
    # plt.axis('off')
    # isWrite     = False
    # isVisualize = True
    # pair_givenID_vis(649, 703, obj_pair2loop,color_choice,isWrite, isVisualize ,visualize_threshold = 40)
    """what IDs each frame has"""