コード例 #1
0
def mergeModel(sfm_data_dirA,
               sfm_data_dirB,
               locFolderB,
               outfile,
               ransacK=1.0,
               ransacRound=10000,
               inputImgDir="",
               minLimit=4,
               svdRatio=1.75):

    print "Loading sfm_data"
    sfm_dataB = FileUtils.loadjson(sfm_data_dirB)

    # read matching pairs from localization result
    imgnameB, matchlistB = readMatch(locFolderB)

    # get viewID from image name for model B
    viewIDB = imgnameToViewID(imgnameB, sfm_dataB)

    # get mapping between viewID,featID to 3D point ID
    viewFeatMapB = getViewFeatTo3DMap(sfm_dataB)

    # find consistent match between 3D of model B to 3D of model A
    print "Calculating consistent 3D matches"
    match3D_BA = getConsistent3DMatch(viewIDB, matchlistB, viewFeatMapB)
    print "Found " + str(len(match3D_BA)) + " consistent matches"

    # not enough matches
    if len(match3D_BA) <= 4 or len(match3D_BA) <= minLimit:
        return len(match3D_BA), [0]

    # move the load of larger model here to reduce time if merging is not possible
    sfm_dataA = FileUtils.loadjson(sfm_data_dirA)

    # get 3D point. Note that element 0 of each pair in match3D_BA
    # is 3D pt ID of model B and element 1 is that of model A
    print "Load 3D points"
    pointA = get3DPointloc(sfm_dataA, [x[1] for x in match3D_BA])
    pointB = get3DPointloc(sfm_dataB, [x[0] for x in match3D_BA])

    pointAn = np.asarray(pointA, dtype=np.float).T
    pointBn = np.asarray(pointB, dtype=np.float).T

    # calculate ransac threshold
    # calculate as 4 times the median of distance between
    # 3D pt of A
    print "Find transformation with RANSAC"
    # modified by T.Ishihara 2016.04.08
    # median of camera positions merge too many points, use median of structure points instead
    #ransacThres = findMedianThres(sfm_dataA, ransacK)
    ransacThres = findMedianStructurePointsThres(sfm_dataA, ransacK)

    # TODO : replace with RANSAC similarity transform
    # find robust transformation
    M, inliers = ransacAffineTransform(pointAn, pointBn, ransacThres,
                                       ransacRound, svdRatio)
    # cannot find RANSAC transformation
    if (len(inliers) == 0):
        return len(match3D_BA), [0]
    print M

    # stop if not enough inliers
    sSvd = np.linalg.svd(M[0:3, 0:3], compute_uv=0)
    if len(inliers) <= 4 or sSvd[0] / sSvd[-1] > svdRatio:
        return len(inliers), M

    # perform merge
    # last argument is map from inliers 3D pt Id of model B to that of model A
    print "Merging sfm_data"
    merge_sfm_data(sfm_dataA, sfm_dataB, M,
                   {match3D_BA[x][0]: match3D_BA[x][1]
                    for x in inliers})

    # change input image folder
    if inputImgDir != "":
        sfm_dataA["root_path"] = inputImgDir

    # save json file
    print "Saving json file"
    FileUtils.savejson(sfm_dataA, outfile)

    # return number of inliers for transformation
    return len(inliers), M
コード例 #2
0
def cleanSfM(sfm_data_path,matchesFile):
    
    sfm_data = FileUtils.loadjson(sfm_data_path)
    if (len(sfm_data["views"])==0):
        print "No views are used in reconstruction of " + sfm_data_path
        return [[],[]]
    if (len(sfm_data["extrinsics"])==0):
        print "No extrinsics are used in reconstruction of " + sfm_data_path
        return [[],[]]
    
    # get map from ID to index
    viewMap = {}
    for i in range(0,len(sfm_data["views"])):
        viewMap[sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["id_view"]] = i
                
    extMap = {}
    for i in range(0,len(sfm_data["extrinsics"])):
        extMap[sfm_data["extrinsics"][i]["key"]] = i
                
    strMap = {}
    for i in range(0,len(sfm_data["structure"])):
        strMap[sfm_data["structure"][i]["key"]] = i
    
    # find viewIDs of first and last frame used in reconstruction
    firstViewID = len(sfm_data["views"])
    lastViewID = 0
    firstExtID = min(extMap.keys())
    
    for i in range(0,len(sfm_data["views"])):
        viewID = sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["id_view"]
        extID = sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["id_pose"]
        
        if extID in extMap:
            if firstViewID > viewID:
                firstViewID = viewID
            if lastViewID < viewID:
                lastViewID = viewID
                
    if firstViewID >= lastViewID:
        print "No views are used in reconstruction of " + sfm_data_path
        return [[],[]]
    
    # get list of unused view back to front
    # and change the view Index
    unusedImgName = [[],[]]
    for i in range(len(sfm_data["views"])-1,lastViewID,-1):
        unusedImgName[1].append(sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["filename"])
        sfm_data["views"].pop(i)
    
    for i in range(lastViewID,firstViewID-1,-1):
        newViewID = sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["id_view"]-firstViewID
        sfm_data["views"][i]["key"] = newViewID
        sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["id_view"] = newViewID
        sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["id_pose"] = sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["id_pose"] - firstExtID
            
    for i in range(firstViewID-1,-1,-1):
        unusedImgName[0].append(sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["filename"])        
        sfm_data["views"].pop(i)
        
    # change extrinsics ID
    for i in range(0,len(sfm_data["extrinsics"])):
        sfm_data["extrinsics"][i]["key"] = sfm_data["extrinsics"][i]["key"]-firstExtID
        
    # change index of refered view in structure
    for i in range(0,len(sfm_data["structure"])):
        for j in range(0,len(sfm_data["structure"][i]["value"]["observations"])):
            sfm_data["structure"][i]["value"]["observations"][j]["key"] = \
                sfm_data["structure"][i]["value"]["observations"][j]["key"]-firstViewID
    
    # save jsonfile back
    FileUtils.savejson(sfm_data,sfm_data_path)
    
    # update matches file
    for matchfile in matchesFile:
        
        matchFileName = os.path.basename(matchfile)
        matchFileNameTmp = matchFileName.join(random.choice(string.lowercase) for i in range(10)) #random name
        matchDir = os.path.dirname(matchfile)
        
        fout = open(os.path.join(matchDir,matchFileNameTmp),"w")
        
        with open(matchfile,"r") as mfile:
            mode = 0
            write = False
            countLine = 0
            
            for line in mfile:
                
                line = line.strip()
                
                if mode == 0:
                    
                    line = line.split(" ")
                    
                    view1 = int(line[0])
                    view2 = int(line[1])
                    
                    if view1 < firstViewID or view1 > lastViewID or \
                        view2 < firstViewID or view2 > lastViewID:
                        write = False
                    else:
                        write = True
                    
                    if write:
                        # update viewID and write out
                        fout.write(str(int(line[0])-firstViewID))
                        fout.write(" ")
                        fout.write(str(int(line[1])-firstViewID))
                        fout.write("\n")
                    
                    countLine = 0
                    mode = 1
                    
                elif mode == 1:
                    
                    numMatch= int(line)
                    
                    if write:
                        # get number of matches and write out
                        fout.write(line + "\n")
                    
                    mode = 2
                    
                elif mode == 2:
                    
                    if write:
                        # write out matches
                        fout.write(line + "\n")
                    
                    countLine = countLine + 1
                    
                    if countLine == numMatch:
                        mode = 0
                        
        os.rename(os.path.join(matchDir,matchFileName),os.path.join(matchDir,matchFileName+"_old"))        
        os.rename(os.path.join(matchDir,matchFileNameTmp),os.path.join(matchDir,matchFileName))        
    
    return unusedImgName
コード例 #3
0
def mergeModel(sfm_data_dirA, sfm_data_dirB, locFolderB, outfile, ransacThres, mergePointThres, ransacRoundMul=100, inputImgDir="", minLimit=4, svdRatio=1.75):
    
    print "Loading sfm_data"
    sfm_dataB = FileUtils.loadjson(sfm_data_dirB)
    
    # read matching pairs from localization result
    imgnameB, matchlistB = readMatch(locFolderB)
    
    # get viewID from image name for model B
    viewIDB = imgnameToViewID(imgnameB, sfm_dataB)

    # get mapping between viewID,featID to 3D point ID
    viewFeatMapB = getViewFeatTo3DMap(sfm_dataB)

    # find consistent match between 3D of model B to 3D of model A
    print "Calculating consistent 3D matches"
    match3D_BA = getConsistent3DMatch(viewIDB, matchlistB, viewFeatMapB)
    print "Found " + str(len(match3D_BA)) + " consistent matches"
    
    # not enough matches
    if len(match3D_BA) <= 4 or len(match3D_BA) <= minLimit:
        return len(match3D_BA), len(match3D_BA), np.asarray([])
 
    # move the load of larger model here to reduce time if merging is not possible
    sfm_dataA = FileUtils.loadjson(sfm_data_dirA)
 
    # get 3D point. Note that element 0 of each pair in match3D_BA
    # is 3D pt ID of model B and element 1 is that of model A
    print "Load 3D points"
    pointA = get3DPointloc(sfm_dataA, [x[1] for x in match3D_BA])
    pointB = get3DPointloc(sfm_dataB, [x[0] for x in match3D_BA])
    
    pointAn = np.asarray(pointA, dtype=np.float).T
    pointBn = np.asarray(pointB, dtype=np.float).T
        
    # find robust transformation
    print "Find transformation with RANSAC"
    ransacRound = len(match3D_BA)*ransacRoundMul
    print "Number of RANSAC round : " + str(ransacRound)
    M, inliers = ransacTransform(pointAn, pointBn, ransacThres, ransacRound, svdRatio)
    
    # cannot find RANSAC transformation
    if (M.size==0):
        return len(match3D_BA), len(match3D_BA), np.asarray([])
    print M
    
    # stop if not enough inliers
    sSvd = np.linalg.svd(M[0:3,0:3],compute_uv=0)
    # fixed by T.Ishihara to use minLimit 2016.06.06
    #if len(inliers) <= 4 or sSvd[0]/sSvd[-1] > svdRatio:
    if len(inliers) <= minLimit or sSvd[0]/sSvd[-1] > svdRatio:
        return len(match3D_BA), len(inliers), M
        
    # perform merge 
    # last argument is map from inliers 3D pt Id of model B to that of model A
    print "Merging sfm_data"
    # fixed by T. Ishihara, use different parameter to find ransac inlier and merge points inliers
    '''
    merge_sfm_data(sfm_dataA, sfm_dataB, M, {match3D_BA[x][0]: match3D_BA[x][1] for x in inliers})
    '''
    mergePointInliers = getInliersByAffineTransform(pointAn, pointBn, M, mergePointThres)
    merge_sfm_data(sfm_dataA, sfm_dataB, M, {match3D_BA[x][0]: match3D_BA[x][1] for x in mergePointInliers})
    
    # change input image folder
    if inputImgDir != "":
        sfm_dataA["root_path"] = inputImgDir
    
    # save json file
    print "Saving json file"
    FileUtils.savejson(sfm_dataA,outfile)
    
    # return number of inliers for transformation
    return len(match3D_BA), len(inliers), M
コード例 #4
0
def cleanSfM(sfm_data_path, matchesFile):

    sfm_data = FileUtils.loadjson(sfm_data_path)
    if (len(sfm_data["views"]) == 0):
        print "No views are used in reconstruction of " + sfm_data_path
        return [[], []]
    if (len(sfm_data["extrinsics"]) == 0):
        print "No extrinsics are used in reconstruction of " + sfm_data_path
        return [[], []]

    # get map from ID to index
    viewMap = {}
    for i in range(0, len(sfm_data["views"])):
        viewMap[sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]
                ["id_view"]] = i

    extMap = {}
    for i in range(0, len(sfm_data["extrinsics"])):
        extMap[sfm_data["extrinsics"][i]["key"]] = i

    strMap = {}
    for i in range(0, len(sfm_data["structure"])):
        strMap[sfm_data["structure"][i]["key"]] = i

    # find viewIDs of first and last frame used in reconstruction
    firstViewID = len(sfm_data["views"])
    lastViewID = 0
    firstExtID = min(extMap.keys())

    for i in range(0, len(sfm_data["views"])):
        viewID = sfm_data["views"][i]["value"]["ptr_wrapper"]["data"][
            "id_view"]
        extID = sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["id_pose"]

        if extID in extMap:
            if firstViewID > viewID:
                firstViewID = viewID
            if lastViewID < viewID:
                lastViewID = viewID

    if firstViewID >= lastViewID:
        print "No views are used in reconstruction of " + sfm_data_path
        return [[], []]

    # get list of unused view back to front
    # and change the view Index
    unusedImgName = [[], []]
    for i in range(len(sfm_data["views"]) - 1, lastViewID, -1):
        unusedImgName[1].append(
            sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["filename"])
        sfm_data["views"].pop(i)

    for i in range(lastViewID, firstViewID - 1, -1):
        newViewID = sfm_data["views"][i]["value"]["ptr_wrapper"]["data"][
            "id_view"] - firstViewID
        sfm_data["views"][i]["key"] = newViewID
        sfm_data["views"][i]["value"]["ptr_wrapper"]["data"][
            "id_view"] = newViewID
        sfm_data["views"][i]["value"]["ptr_wrapper"]["data"][
            "id_pose"] = sfm_data["views"][i]["value"]["ptr_wrapper"]["data"][
                "id_pose"] - firstExtID

    for i in range(firstViewID - 1, -1, -1):
        unusedImgName[0].append(
            sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["filename"])
        sfm_data["views"].pop(i)

    # change extrinsics ID
    for i in range(0, len(sfm_data["extrinsics"])):
        sfm_data["extrinsics"][i][
            "key"] = sfm_data["extrinsics"][i]["key"] - firstExtID

    # change index of refered view in structure
    for i in range(0, len(sfm_data["structure"])):
        for j in range(0,
                       len(sfm_data["structure"][i]["value"]["observations"])):
            sfm_data["structure"][i]["value"]["observations"][j]["key"] = \
                sfm_data["structure"][i]["value"]["observations"][j]["key"]-firstViewID

    # save jsonfile back
    FileUtils.savejson(sfm_data, sfm_data_path)

    # update matches file
    for matchfile in matchesFile:

        matchFileName = os.path.basename(matchfile)
        matchFileNameTmp = matchFileName.join(
            random.choice(string.lowercase) for i in range(10))  #random name
        matchDir = os.path.dirname(matchfile)

        fout = open(os.path.join(matchDir, matchFileNameTmp), "w")

        with open(matchfile, "r") as mfile:
            mode = 0
            write = False
            countLine = 0

            for line in mfile:

                line = line.strip()

                if mode == 0:

                    line = line.split(" ")

                    view1 = int(line[0])
                    view2 = int(line[1])

                    if view1 < firstViewID or view1 > lastViewID or \
                        view2 < firstViewID or view2 > lastViewID:
                        write = False
                    else:
                        write = True

                    if write:
                        # update viewID and write out
                        fout.write(str(int(line[0]) - firstViewID))
                        fout.write(" ")
                        fout.write(str(int(line[1]) - firstViewID))
                        fout.write("\n")

                    countLine = 0
                    mode = 1

                elif mode == 1:

                    numMatch = int(line)

                    if write:
                        # get number of matches and write out
                        fout.write(line + "\n")

                    mode = 2

                elif mode == 2:

                    if write:
                        # write out matches
                        fout.write(line + "\n")

                    countLine = countLine + 1

                    if countLine == numMatch:
                        mode = 0

        os.rename(os.path.join(matchDir, matchFileName),
                  os.path.join(matchDir, matchFileName + "_old"))
        os.rename(os.path.join(matchDir, matchFileNameTmp),
                  os.path.join(matchDir, matchFileName))

    return unusedImgName
コード例 #5
0
ファイル: mergeSfM.py プロジェクト: hulop/SfMLocalization
def mergeModel(sfm_data_dirA, sfm_data_dirB, locFolderB, outfile, ransacThres, mergePointThres, ransacRoundMul=100, inputImgDir="", minLimit=4, svdRatio=1.75):
    
    print "Loading sfm_data"
    sfm_dataB = FileUtils.loadjson(sfm_data_dirB)
    
    # read matching pairs from localization result
    imgnameB, matchlistB = readMatch(locFolderB)
    
    # get viewID from image name for model B
    viewIDB = imgnameToViewID(imgnameB, sfm_dataB)

    # get mapping between viewID,featID to 3D point ID
    viewFeatMapB = getViewFeatTo3DMap(sfm_dataB)

    # find consistent match between 3D of model B to 3D of model A
    print "Calculating consistent 3D matches"
    match3D_BA = getConsistent3DMatch(viewIDB, matchlistB, viewFeatMapB)
    print "Found " + str(len(match3D_BA)) + " consistent matches"
    
    # not enough matches
    if len(match3D_BA) <= 4 or len(match3D_BA) <= minLimit:
        return len(match3D_BA), len(match3D_BA), np.asarray([])
 
    # move the load of larger model here to reduce time if merging is not possible
    sfm_dataA = FileUtils.loadjson(sfm_data_dirA)
 
    # get 3D point. Note that element 0 of each pair in match3D_BA
    # is 3D pt ID of model B and element 1 is that of model A
    print "Load 3D points"
    pointA = get3DPointloc(sfm_dataA, [x[1] for x in match3D_BA])
    pointB = get3DPointloc(sfm_dataB, [x[0] for x in match3D_BA])
    
    pointAn = np.asarray(pointA, dtype=np.float).T
    pointBn = np.asarray(pointB, dtype=np.float).T
        
    # find robust transformation
    print "Find transformation with RANSAC"
    ransacRound = len(match3D_BA)*ransacRoundMul
    print "Number of RANSAC round : " + str(ransacRound)
    M, inliers = ransacTransform(pointAn, pointBn, ransacThres, ransacRound, svdRatio)
    
    # cannot find RANSAC transformation
    if (M.size==0):
        return len(match3D_BA), len(match3D_BA), np.asarray([])
    print M
    
    # stop if not enough inliers
    sSvd = np.linalg.svd(M[0:3,0:3],compute_uv=0)
    # fixed by T.Ishihara to use minLimit 2016.06.06
    #if len(inliers) <= 4 or sSvd[0]/sSvd[-1] > svdRatio:
    if len(inliers) <= minLimit or sSvd[0]/sSvd[-1] > svdRatio:
        return len(match3D_BA), len(inliers), M
        
    # perform merge 
    # last argument is map from inliers 3D pt Id of model B to that of model A
    print "Merging sfm_data"
    # fixed by T. Ishihara, use different parameter to find ransac inlier and merge points inliers
    '''
    merge_sfm_data(sfm_dataA, sfm_dataB, M, {match3D_BA[x][0]: match3D_BA[x][1] for x in inliers})
    '''
    mergePointInliers = getInliersByAffineTransform(pointAn, pointBn, M, mergePointThres)
    merge_sfm_data(sfm_dataA, sfm_dataB, M, {match3D_BA[x][0]: match3D_BA[x][1] for x in mergePointInliers})
    
    # change input image folder
    if inputImgDir != "":
        sfm_dataA["root_path"] = inputImgDir
    
    # save json file
    print "Saving json file"
    FileUtils.savejson(sfm_dataA,outfile)
    
    # return number of inliers for transformation
    return len(match3D_BA), len(inliers), M