def main(doTubeFormat, inFileName, outTrajIDfileName, featPath, nrTrajThresh4Tube=0): tube2trajIDs.check() outPath = os.path.dirname(outTrajIDfileName) if not os.path.exists( outPath ): os.makedirs(outPath) # read trajectory positions print '\tGet trajectory positions;', geoFeat = denseTraj.getFeatFromFileByName(featPath, 'geo') # if proposals are stored as Tubes already, read them if doTubeFormat: tubeProposals = TubeList() tubeProposals.readHDF5(inFileName) # initialize with a single name inHDF5fileClusts = ['tubes'] else: print '\tGet video dimensions;', vidInfo = denseTraj.getFeatFromFileByName(featPath, 'vidinfo') xmax = vidInfo[1] ymax = vidInfo[2] # input cluster file inHDF5fileClusts = h5py.File( inFileName, 'r') # go over all features totNrProposals = 0 for featName in inHDF5fileClusts: if not doTubeFormat: # get the clustered proposals for this feature type in SLINK pointer-representation dset = inHDF5fileClusts[featName] mergedTracks = dset[()] # convert pointer-representation to tube proposals if len(mergedTracks.shape) > 1 and mergedTracks.shape[0] > 1: tubeProposals = denseTraj.createProposals(mergedTracks, geoFeat, nrTrajThresh4Tube, xmax, ymax) nrProposals = len(tubeProposals) print '\tFeature: "%s", number of proposals: %d;' % (featName,nrProposals) # if ok if nrProposals > 0: # write as traj IDs if totNrProposals==0: outHDF5file = h5py.File( outTrajIDfileName, 'w', compression="gzip", compression_opts=9) for j in range(nrProposals): trajIDs = tubeProposals[j].tube2trajIDs(geoFeat) outHDF5file.create_dataset(str(totNrProposals), data=trajIDs) totNrProposals += 1 print '\tWrite trajectory IDs to', outTrajIDfileName outHDF5file.close()
def main(inFileName, featPath, nrTrajThresh4Tube, outFileName): #print inFileName, featPath, nrTrajThresh4Tube, outFileName #sys.exit() outPath = os.path.dirname(outFileName) if not os.path.exists(outPath): os.makedirs(outPath) # output file print '\tWrite tubes to: %s' % (outFileName) outHDF5fileTubes = h5py.File(outFileName, 'w') # get the trajectory locations geoFeat = denseTraj.getFeatFromFileByName(featPath, 'geo') # get vid info (length, width, height) vidInfo = denseTraj.getFeatFromFileByName(featPath, 'vidinfo') xmax = vidInfo[1] ymax = vidInfo[2] # input cluster file inHDF5fileClusts = h5py.File(inFileName, 'r') propOutstartID = 0 for featName in inHDF5fileClusts: # get the clustered proposals for this feature type in SLINK pointer-representation #featName = featNames[featNameID] #fnameSS = os.path.join(inPath, featName + suffixIn + '.npy') #mergedTracks = np.load( fnameSS ) dset = inHDF5fileClusts[featName] mergedTracks = dset[()] # convert pointer-representation to tube proposals and write to disk if len(mergedTracks.shape) > 1 and mergedTracks.shape[0] > 1: tubeProposals = denseTraj.createProposals(mergedTracks, geoFeat, nrTrajThresh4Tube, xmax, ymax) nrProposals = len(tubeProposals) print '\tFeature: "%s"; Number of proposals: %d' % (featName, nrProposals), # if OK if nrProposals > 0: # write to file #print '; writing proposals:', outPath tubeProposals.writeHDF5(outHDF5fileTubes, propOutstartID) propOutstartID += nrProposals print 'tot:', propOutstartID # close the file outHDF5fileTubes.close()
def main(inFileName, featPath, nrTrajThresh4Tube, outFileName): # print inFileName, featPath, nrTrajThresh4Tube, outFileName # sys.exit() outPath = os.path.dirname(outFileName) if not os.path.exists(outPath): os.makedirs(outPath) # output file print "\tWrite tubes to: %s" % (outFileName) outHDF5fileTubes = h5py.File(outFileName, "w") # get the trajectory locations geoFeat = denseTraj.getFeatFromFileByName(featPath, "geo") # get vid info (length, width, height) vidInfo = denseTraj.getFeatFromFileByName(featPath, "vidinfo") xmax = vidInfo[1] ymax = vidInfo[2] # input cluster file inHDF5fileClusts = h5py.File(inFileName, "r") propOutstartID = 0 for featName in inHDF5fileClusts: # get the clustered proposals for this feature type in SLINK pointer-representation # featName = featNames[featNameID] # fnameSS = os.path.join(inPath, featName + suffixIn + '.npy') # mergedTracks = np.load( fnameSS ) dset = inHDF5fileClusts[featName] mergedTracks = dset[()] # convert pointer-representation to tube proposals and write to disk if len(mergedTracks.shape) > 1 and mergedTracks.shape[0] > 1: tubeProposals = denseTraj.createProposals(mergedTracks, geoFeat, nrTrajThresh4Tube, xmax, ymax) nrProposals = len(tubeProposals) print '\tFeature: "%s"; Number of proposals: %d' % (featName, nrProposals), # if OK if nrProposals > 0: # write to file # print '; writing proposals:', outPath tubeProposals.writeHDF5(outHDF5fileTubes, propOutstartID) propOutstartID += nrProposals print "tot:", propOutstartID # close the file outHDF5fileTubes.close()
def main(doTubeFormat, inFileName, outIoUfile, gtPath, nrTrajThresh4Tube=-1, featPath=''): # check if cython is used tubeIoU.check() # read ground truth tubes print '\tRead ground truth;', gtTubes = TubeList() gtTubes.readHDF5(gtPath) outPath = os.path.dirname(outIoUfile) if not os.path.exists( outPath ): os.makedirs(outPath) # if proposals are stored as Tubes already, read them if doTubeFormat: tubeProposals = TubeList() tubeProposals.readHDF5(inFileName) # initialize with a single name inHDF5fileClusts = ['tubes'] else: # otherwise, read trajectory positions and vidinfo to generate proposals later print '\tGet trajectory positions;', geoFeat = denseTraj.getFeatFromFileByName(featPath, 'geo') print '\tGet video dimensions;', vidInfo = denseTraj.getFeatFromFileByName(featPath, 'vidinfo') xmax = vidInfo[1] ymax = vidInfo[2] # input cluster file inHDF5fileClusts = h5py.File( inFileName, 'r') aboMax = [] # go over all features for featName in inHDF5fileClusts: if not doTubeFormat: # get the clustered proposals for this feature type in SLINK pointer-representation dset = inHDF5fileClusts[featName] mergedTracks = dset[()] # convert pointer-representation to tube proposals if len(mergedTracks.shape) > 1 and mergedTracks.shape[0] > 1: tubeProposals = denseTraj.createProposals(mergedTracks, geoFeat, nrTrajThresh4Tube, xmax, ymax) nrProposals = len(tubeProposals) print '\tFeature: "%s", number of proposals: %d;' % (featName,nrProposals), # if ok if nrProposals > 0: # get Intersection over Union scores aboMatCur = tubeProposals.computeTubeOverlap(gtTubes) # if first feature, write to new file otherwise append to existing file if aboMax == []: fileOut = open(outIoUfile, 'w') else: fileOut = open(outIoUfile, 'a') # write scores for prop in range(aboMatCur.shape[0]): for score in aboMatCur[prop,:]: fileOut.write('%f ' % score) fileOut.write('\n') fileOut.close() # keep track of the maximum score curMax = np.max(aboMatCur, axis=0) if aboMax == []: aboMax = curMax else: aboMax = np.maximum( aboMax, curMax) print 'Best IoUs:', curMax, '; Best so far:', aboMax print '\tWriting IoU scores to', outIoUfile if not doTubeFormat: inHDF5fileClusts.close()
def main(featPath, nrSpatNeighbors, isTrimmedVideo, outFileName): """main function""" if isTrimmedVideo: featNorm = "Z" tempScale = 0.0 else: featNorm = "MiMaN" tempScale = 1.0 # simple timing tic = time.time() # a bool to check if the spatial neighborhood is already computed spatFeatComputed = False # a bool to check if there were enough trajectories to cluster enoughTrajectories = True # mapping from feature to similarities (to avoid recomputing) feat2neighborSims = {} # if some features are already computed, only compute missing features doneFeatNames = [] if not os.path.exists(outFileName): outHDF5file = h5py.File( outFileName, 'w') outHDF5file.close() print '\twriting features to %s' % outFileName else: try: print '\tadding (possible) missing features to %s' % outFileName inHDF5file = h5py.File( outFileName, 'r') doneFeatNames = inHDF5file.keys() inHDF5file.close() except IOError as e: outHDF5file = h5py.File( outFileName, 'w') outHDF5file.close() print '\toverwriting features to %s' % outFileName # for a given feature (or feature combination, a combination has a '-' in it) for featName in featNames: #print 'featName "%s"' % featName # if featName already computed, skip it. if featName not in doneFeatNames: if enoughTrajectories: #print 'do', featName #sys.exit() #print featName.split('-') if not spatFeatComputed: feat = denseTraj.getFeatFromFileByName(featPath, 'spat', tempScale) nrTracks = feat.shape[0] if nrTracks < nrSpatNeighbors: print featName, 'not enough tracks to continue:', nrTracks, enoughTrajectories = False continue #vidInfo = denseTraj.getFeatFromFileByName(featPath, 'vidinfo') #simMatIDs, simMat = tubeClust.getTrackNeighborsYaelYnumpyChunked(feat, nrSpatNeighbors, int(vidInfo[0])) simMatIDs, _simMat = tubeClust.getTrackNeighborsYaelYnumpy(feat, nrSpatNeighbors) neighborGraph = tubeClust.getNeighborGraph(simMatIDs, nrTracks) spatFeatComputed = True if enoughTrajectories: # combined features are separated with a dash "-" combiFeatNames = featName.split('-') # first do the first feature if combiFeatNames[0] not in feat2neighborSims.keys(): feat = denseTraj.getFeatFromFileByName(featPath, combiFeatNames[0], tempScale) neighborSims = tubeClust.getNeighborSimilarities(neighborGraph, feat, featName2DistFun[combiFeatNames[0]]) if featNorm == 'Z': neighborSims = tubeClust.normalizeNeighborSimilarities(neighborSims) if featNorm == 'MiMaN': neighborSims = tubeClust.normalizeNeighborSimilaritiesMiMa(neighborSims) feat2neighborSims[ combiFeatNames[0] ] = neighborSims else: neighborSims = feat2neighborSims[ combiFeatNames[0] ] # add the scores of the to-be-combined features for indiFeat in combiFeatNames[1:]: #print indiFeat, if indiFeat not in feat2neighborSims.keys(): feat = denseTraj.getFeatFromFileByName(featPath, indiFeat, tempScale) neighborSimsIndi = tubeClust.getNeighborSimilarities(neighborGraph, feat, featName2DistFun[indiFeat] ) if featNorm == 'Z': neighborSimsIndi = tubeClust.normalizeNeighborSimilarities(neighborSimsIndi) if featNorm == 'MiMaN': neighborSimsIndi = tubeClust.normalizeNeighborSimilaritiesMiMa(neighborSimsIndi) feat2neighborSims[indiFeat] = neighborSimsIndi else: neighborSimsIndi = feat2neighborSims[indiFeat] neighborSims = neighborSims * neighborSimsIndi Lambda,Pi = slink.SSclustSlink(neighborGraph, neighborSims, nrTracks) mergedTracks = tubeClust.convertPointerRepresentation(np.array(Lambda), Pi, nrTracks) toc = time.time() print '; Time: %.2fs' % (toc-tic) if not enoughTrajectories: mergedTracks = -1 print '\twrite to: %s, "%s"' % (outFileName, featName) outHDF5file = h5py.File( outFileName, 'a') outHDF5file.create_dataset(featName, data=mergedTracks) outHDF5file.close()
def main(doTubeFormat, inFileName, outIoUfile, gtPath, nrTrajThresh4Tube=-1, featPath=''): # check if cython is used tubeIoU.check() # read ground truth tubes print '\tRead ground truth;', gtTubes = TubeList() gtTubes.readHDF5(gtPath) outPath = os.path.dirname(outIoUfile) if not os.path.exists(outPath): os.makedirs(outPath) # if proposals are stored as Tubes already, read them if doTubeFormat: tubeProposals = TubeList() tubeProposals.readHDF5(inFileName) # initialize with a single name inHDF5fileClusts = ['tubes'] else: # otherwise, read trajectory positions and vidinfo to generate proposals later print '\tGet trajectory positions;', geoFeat = denseTraj.getFeatFromFileByName(featPath, 'geo') print '\tGet video dimensions;', vidInfo = denseTraj.getFeatFromFileByName(featPath, 'vidinfo') xmax = vidInfo[1] ymax = vidInfo[2] # input cluster file inHDF5fileClusts = h5py.File(inFileName, 'r') aboMax = [] # go over all features for featName in inHDF5fileClusts: if not doTubeFormat: # get the clustered proposals for this feature type in SLINK pointer-representation dset = inHDF5fileClusts[featName] mergedTracks = dset[()] # convert pointer-representation to tube proposals if len(mergedTracks.shape) > 1 and mergedTracks.shape[0] > 1: tubeProposals = denseTraj.createProposals( mergedTracks, geoFeat, nrTrajThresh4Tube, xmax, ymax) nrProposals = len(tubeProposals) print '\tFeature: "%s", number of proposals: %d;' % (featName, nrProposals), # if ok if nrProposals > 0: # get Intersection over Union scores aboMatCur = tubeProposals.computeTubeOverlap(gtTubes) # if first feature, write to new file otherwise append to existing file if aboMax == []: fileOut = open(outIoUfile, 'w') else: fileOut = open(outIoUfile, 'a') # write scores for prop in range(aboMatCur.shape[0]): for score in aboMatCur[prop, :]: fileOut.write('%f ' % score) fileOut.write('\n') fileOut.close() # keep track of the maximum score curMax = np.max(aboMatCur, axis=0) if aboMax == []: aboMax = curMax else: aboMax = np.maximum(aboMax, curMax) print 'Best IoUs:', curMax, '; Best so far:', aboMax print '\tWriting IoU scores to', outIoUfile if not doTubeFormat: inHDF5fileClusts.close()
def main(doTubeFormat, inFileName, outTrajIDfileName, featPath, nrTrajThresh4Tube=0): tube2trajIDs.check() outPath = os.path.dirname(outTrajIDfileName) if not os.path.exists(outPath): os.makedirs(outPath) # read trajectory positions print '\tGet trajectory positions;', geoFeat = denseTraj.getFeatFromFileByName(featPath, 'geo') # if proposals are stored as Tubes already, read them if doTubeFormat: tubeProposals = TubeList() tubeProposals.readHDF5(inFileName) # initialize with a single name inHDF5fileClusts = ['tubes'] else: print '\tGet video dimensions;', vidInfo = denseTraj.getFeatFromFileByName(featPath, 'vidinfo') xmax = vidInfo[1] ymax = vidInfo[2] # input cluster file inHDF5fileClusts = h5py.File(inFileName, 'r') # go over all features totNrProposals = 0 for featName in inHDF5fileClusts: if not doTubeFormat: # get the clustered proposals for this feature type in SLINK pointer-representation dset = inHDF5fileClusts[featName] mergedTracks = dset[()] # convert pointer-representation to tube proposals if len(mergedTracks.shape) > 1 and mergedTracks.shape[0] > 1: tubeProposals = denseTraj.createProposals( mergedTracks, geoFeat, nrTrajThresh4Tube, xmax, ymax) nrProposals = len(tubeProposals) print '\tFeature: "%s", number of proposals: %d;' % (featName, nrProposals) # if ok if nrProposals > 0: # write as traj IDs if totNrProposals == 0: outHDF5file = h5py.File(outTrajIDfileName, 'w', compression="gzip", compression_opts=9) for j in range(nrProposals): trajIDs = tubeProposals[j].tube2trajIDs(geoFeat) outHDF5file.create_dataset(str(totNrProposals), data=trajIDs) totNrProposals += 1 print '\tWrite trajectory IDs to', outTrajIDfileName outHDF5file.close()
def main(featPath, nrSpatNeighbors, isTrimmedVideo, outFileName): """main function""" if isTrimmedVideo: featNorm = "Z" tempScale = 0.0 else: featNorm = "MiMaN" tempScale = 1.0 # simple timing tic = time.time() # a bool to check if the spatial neighborhood is already computed spatFeatComputed = False # a bool to check if there were enough trajectories to cluster enoughTrajectories = True # mapping from feature to similarities (to avoid recomputing) feat2neighborSims = {} # if some features are already computed, only compute missing features doneFeatNames = [] if not os.path.exists(outFileName): outHDF5file = h5py.File(outFileName, 'w') outHDF5file.close() print '\twriting features to %s' % outFileName else: try: print '\tadding (possible) missing features to %s' % outFileName inHDF5file = h5py.File(outFileName, 'r') doneFeatNames = inHDF5file.keys() inHDF5file.close() except IOError as e: outHDF5file = h5py.File(outFileName, 'w') outHDF5file.close() print '\toverwriting features to %s' % outFileName # for a given feature (or feature combination, a combination has a '-' in it) for featName in featNames: #print 'featName "%s"' % featName # if featName already computed, skip it. if featName not in doneFeatNames: if enoughTrajectories: #print 'do', featName #sys.exit() #print featName.split('-') if not spatFeatComputed: feat = denseTraj.getFeatFromFileByName( featPath, 'spat', tempScale) nrTracks = feat.shape[0] if nrTracks < nrSpatNeighbors: print featName, 'not enough tracks to continue:', nrTracks, enoughTrajectories = False continue #vidInfo = denseTraj.getFeatFromFileByName(featPath, 'vidinfo') #simMatIDs, simMat = tubeClust.getTrackNeighborsYaelYnumpyChunked(feat, nrSpatNeighbors, int(vidInfo[0])) simMatIDs, _simMat = tubeClust.getTrackNeighborsYaelYnumpy( feat, nrSpatNeighbors) neighborGraph = tubeClust.getNeighborGraph( simMatIDs, nrTracks) spatFeatComputed = True if enoughTrajectories: # combined features are separated with a dash "-" combiFeatNames = featName.split('-') # first do the first feature if combiFeatNames[0] not in feat2neighborSims.keys(): feat = denseTraj.getFeatFromFileByName( featPath, combiFeatNames[0], tempScale) neighborSims = tubeClust.getNeighborSimilarities( neighborGraph, feat, featName2DistFun[combiFeatNames[0]]) if featNorm == 'Z': neighborSims = tubeClust.normalizeNeighborSimilarities( neighborSims) if featNorm == 'MiMaN': neighborSims = tubeClust.normalizeNeighborSimilaritiesMiMa( neighborSims) feat2neighborSims[combiFeatNames[0]] = neighborSims else: neighborSims = feat2neighborSims[combiFeatNames[0]] # add the scores of the to-be-combined features for indiFeat in combiFeatNames[1:]: #print indiFeat, if indiFeat not in feat2neighborSims.keys(): feat = denseTraj.getFeatFromFileByName( featPath, indiFeat, tempScale) neighborSimsIndi = tubeClust.getNeighborSimilarities( neighborGraph, feat, featName2DistFun[indiFeat]) if featNorm == 'Z': neighborSimsIndi = tubeClust.normalizeNeighborSimilarities( neighborSimsIndi) if featNorm == 'MiMaN': neighborSimsIndi = tubeClust.normalizeNeighborSimilaritiesMiMa( neighborSimsIndi) feat2neighborSims[indiFeat] = neighborSimsIndi else: neighborSimsIndi = feat2neighborSims[indiFeat] neighborSims = neighborSims * neighborSimsIndi Lambda, Pi = slink.SSclustSlink(neighborGraph, neighborSims, nrTracks) mergedTracks = tubeClust.convertPointerRepresentation( np.array(Lambda), Pi, nrTracks) toc = time.time() print '; Time: %.2fs' % (toc - tic) if not enoughTrajectories: mergedTracks = -1 print '\twrite to: %s, "%s"' % (outFileName, featName) outHDF5file = h5py.File(outFileName, 'a') outHDF5file.create_dataset(featName, data=mergedTracks) outHDF5file.close()