def __init__(self, inputPath, outputPath, mInputPath, mSfMPath, mMatchesPath, mCsvPath, mInputImgPath, workspacePath, minReconFrame=25):
     self.sfmModel = [] # list of sfmModel objects for merging
     self.mSfMPath = mSfMPath # sfm path containing multiple folder of merged models
     self.mMatchesPath = mMatchesPath # matches path
     self.mCsvPath = mCsvPath # csv path
     self.mInputImgPath = mInputImgPath # input images path
     self.mInputPath = mInputPath
     self.nMergedModel = 0 # number of merged performed
     self.badMatches = [] # keeps pair of bad matches
     self.workspacePath = workspacePath
     
     FileUtils.makedir(self.mSfMPath)
     FileUtils.makedir(self.mMatchesPath)
     FileUtils.makedir(self.mCsvPath)
     FileUtils.makedir(self.mInputImgPath)
     FileUtils.makedir(self.mInputPath)
     
     # list all folder as projects
     if os.path.isdir(inputPath):
         
         listDir = os.listdir(inputPath)
         
         for folder in sorted(listDir):
             
             # add model to self
             self.addModel(
                     folder,
                     os.path.join(inputPath,folder),
                     os.path.join(outputPath,folder),
                     minimumFrame=minReconFrame)
 def __init__(self, inputPath, outputPath, mInputPath, mSfMPath, mMatchesPath, mCsvPath, mInputImgPath, 
              workspacePath, validMergeRansacThresK=5, ransacStructureThresK=10,
              mergeStructureThresK=0.01, minReconFrame=25):
     self.sfmModel = [] # list of sfmModel objects for merging
     self.mSfMPath = mSfMPath # sfm path containing multiple folder of merged models
     self.mMatchesPath = mMatchesPath # matches path
     self.mCsvPath = mCsvPath # csv path
     self.mInputImgPath = mInputImgPath # input images path
     self.mInputPath = mInputPath
     self.nMergedModel = 0 # number of merged performed
     self.badMatches = [] # keeps pair of bad matches
     self.workspacePath = workspacePath
     
     FileUtils.makedir(self.mSfMPath)
     FileUtils.makedir(self.mMatchesPath)
     FileUtils.makedir(self.mCsvPath)
     FileUtils.makedir(self.mInputImgPath)
     FileUtils.makedir(self.mInputPath)
     
     # list all folder as projects
     if os.path.isdir(inputPath):
         
         listDir = os.listdir(inputPath)
         
         for folder in sorted(listDir):
             
             # add model to self
             self.addModel(folder, os.path.join(inputPath,folder), os.path.join(outputPath,folder),
                     minReconFrame, validMergeRansacThresK, ransacStructureThresK, mergeStructureThresK)
Example #3
0
def modelMergeCheckLocal(sfm_data_path, sfm_locOut, medThres):
    
    # load sfm_data
    sfm_data = FileUtils.loadjson(sfm_data_path)
        
    # collect all image names ad location
    imgName = []
    imgLoc = []
    for filename in os.listdir(sfm_locOut):
        
        if filename[-4:]!="json":
                continue
        
        locJsonDict = FileUtils.loadjson(os.path.join(sfm_locOut,filename))
        if "t" in locJsonDict:
            imgName.append(os.path.basename(locJsonDict["filename"]))
            imgLoc.append(locJsonDict["t"])
        
    imgID = imgnameToViewID(imgName, sfm_data)    
    imgSfMLoc = get3DViewloc(sfm_data, imgID)
        
    # calculate distance and count agreement
    countFile = 0
    countAgree = 0
    
    for j in range(0,len(imgLoc)):
        dist = np.linalg.norm(np.array(imgLoc[j])-np.array(imgSfMLoc[j]))
        if dist < float("inf"):
            countFile = countFile + 1
            
            if dist < medThres:
                countAgree = countAgree + 1
                
    return countFile, countAgree
Example #4
0
def modelMergeCheckLocal(sfm_data_path, sfm_locOut, medThres):
    
    # load sfm_data
    sfm_data = FileUtils.loadjson(sfm_data_path)
        
    # collect all image names ad location
    imgName = []
    imgLoc = []
    for filename in os.listdir(sfm_locOut):
        
        if filename[-4:]!="json":
                continue
        
        locJsonDict = FileUtils.loadjson(os.path.join(sfm_locOut,filename))
        imgName.append(os.path.basename(locJsonDict["filename"]))
        imgLoc.append(locJsonDict["t"])
        
    imgID = imgnameToViewID(imgName, sfm_data)    
    imgSfMLoc = get3DViewloc(sfm_data, imgID)
        
    # calculate distance and count agreement
    countFile = 0
    countAgree = 0
    
    for j in range(0,len(imgLoc)):
        dist = np.linalg.norm(np.array(imgLoc[j])-np.array(imgSfMLoc[j]))
        if dist < float("inf"):
            countFile = countFile + 1
            
            if dist < medThres:
                countAgree = countAgree + 1
                
    return countFile, countAgree
def copyOriginalFiles(inputDir, outputDir):
    if not os.path.isdir(os.path.join(outputDir, "Input")):
        FileUtils.makedir(os.path.join(outputDir, "Input"))
    if not os.path.isdir(os.path.join(outputDir, "Input", "inputImg")):
        FileUtils.makedir(os.path.join(outputDir, "Input", "inputImg"))
    if not os.path.isdir(os.path.join(outputDir, "Input", "csv")):
        FileUtils.makedir(os.path.join(outputDir, "Input", "csv"))
    if not os.path.isdir(os.path.join(outputDir, "Output", "matches")):
        FileUtils.makedir(os.path.join(outputDir, "Output", "matches"))

    os.system("cp --remove-destination " + FILE_COPY_OPTION + " " +
              os.path.join(inputDir, "Input", "*", "inputImg", "*") + " " +
              os.path.join(outputDir, "Input", "inputImg"))
    os.system("cp --remove-destination " + FILE_COPY_OPTION + " " +
              os.path.join(inputDir, "Input", "*", "csv", "*") + " " +
              os.path.join(outputDir, "Input", "csv"))
    os.system("cp --remove-destination " + FILE_COPY_OPTION + " " +
              os.path.join(inputDir, "Output", "*", "matches", "*.desc") +
              " " + os.path.join(outputDir, "Output", "matches"))
    os.system("cp --remove-destination " + FILE_COPY_OPTION + " " +
              os.path.join(inputDir, "Output", "*", "matches", "*.feat") +
              " " + os.path.join(outputDir, "Output", "matches"))
    os.system("cp --remove-destination " + FILE_COPY_OPTION + " " +
              os.path.join(inputDir, "Output", "final", "Output", "matches",
                           "image_describer.txt") + " " +
              os.path.join(outputDir, "Output", "matches"))
Example #6
0
def calculateAverageBOW(sfmDataFile, matchesFolLoc):
    sfmData = FileUtils.loadjson(sfmDataFile)
                
    avgBow = None
    for view in sfmData["views"]:
        viewImage = view["value"]["ptr_wrapper"]["data"]["filename"]
        viewBow = os.path.join(matchesFolLoc, os.path.splitext(viewImage)[0] + ".bow")
        bowvec = FileUtils.loadBinMat(viewBow)
        if avgBow is None:
            avgBow = bowvec
        else:
            avgBow += bowvec
        avgBow /= len(sfmData["views"])
    
    return avgBow
def calculateAverageBOW(sfmDataFile, matchesFolLoc):
    sfmData = FileUtils.loadjson(sfmDataFile)

    avgBow = None
    for view in sfmData["views"]:
        viewImage = view["value"]["ptr_wrapper"]["data"]["filename"]
        viewBow = os.path.join(matchesFolLoc,
                               os.path.splitext(viewImage)[0] + ".bow")
        bowvec = FileUtils.loadBinMat(viewBow)
        if avgBow is None:
            avgBow = bowvec
        else:
            avgBow += bowvec
        avgBow /= len(sfmData["views"])

    return avgBow
Example #8
0
    def __init__(self,
                 name,
                 imgFolLoc,
                 csvFolLoc,
                 matchesFolLoc,
                 locFolLoc,
                 sfm_dataLoc,
                 validMergeRansacThres=-1,
                 validMergeRansacThresK=-1,
                 ransacStructureThres=-1,
                 ransacStructureThresK=-1,
                 mergeStructureThres=-1,
                 mergeStructureThresK=-1):
        if (validMergeRansacThres == -1 and validMergeRansacThresK == -1):
            print "error : invalid argument for sfmModel valid merge ransac"
            sys.exit()
        if (ransacStructureThres == -1 and ransacStructureThresK == -1):
            print "error : invalid argument for sfmModel structure ransac"
            sys.exit()
        if (mergeStructureThres == -1 and mergeStructureThresK == -1):
            print "error : invalid argument for sfmModel structure merge"
            sys.exit()

        self.name = name  # folder name
        self.mergeOrder = name  # structure similar to a tree specifying merge order
        self.imgFolLoc = imgFolLoc  # folder dir of input image folder
        self.csvFolLoc = csvFolLoc  # folder dir of csv folder
        self.matchesFolLoc = matchesFolLoc  # folder of match folder with descriptor
        self.locFolLoc = locFolLoc  # folder of localization result
        self.sfm_dataLoc = sfm_dataLoc  # file dir of sfm_data.json

        # get list of reconstructed frames
        if self.sfm_dataLoc != "":
            sfm_data = FileUtils.loadjson(self.sfm_dataLoc)
            extKeyTmp = [x["key"] for x in sfm_data["extrinsics"]]
            self.reconFrame = [
                x["value"]["ptr_wrapper"]["data"]["id_view"]
                for x in sfm_data["views"]
                if x["value"]["ptr_wrapper"]["data"]["id_pose"] in extKeyTmp
            ]
            if validMergeRansacThresK > 0:
                self.validMergeRansacThres = mergeSfM.findMedianThres(
                    sfm_data, validMergeRansacThresK)
            else:
                self.validMergeRansacThres = validMergeRansacThres

            if ransacStructureThresK > 0:
                self.ransacStructureThres = mergeSfM.findMedianStructurePointsThres(
                    sfm_data, ransacStructureThresK)
            else:
                self.ransacStructureThres = ransacStructureThres

            if mergeStructureThresK > 0:
                self.mergeStructureThres = mergeSfM.findMedianStructurePointsThres(
                    sfm_data, mergeStructureThresK)
            else:
                self.mergeStructureThres = mergeStructureThres
def main():
    description = 'This script is for converting Numpy Mat txt file to OpenCV mat YAML file.'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('input_file', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Input file path of Mat text data saved by Python Numpy.')
    parser.add_argument('output_file', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Ouput file path of YAML Mat data for C++ OpenCV.')
    parser.add_argument('output_name', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Mat name stored in output YAML file.')
    args = parser.parse_args()
    input_file = args.input_file
    output_file = args.output_file
    output_name = args.output_name
    
    FileUtils.convertNumpyMatTxt2OpenCvMatYml(input_file, output_file, output_name)
def main():
    description = "This script is for converting Numpy Mat txt file to OpenCV mat YAML file."
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument(
        "input_file",
        action="store",
        nargs=None,
        const=None,
        default=None,
        type=str,
        choices=None,
        metavar=None,
        help="Input file path of Mat text data saved by Python Numpy.",
    )
    parser.add_argument(
        "output_file",
        action="store",
        nargs=None,
        const=None,
        default=None,
        type=str,
        choices=None,
        metavar=None,
        help="Ouput file path of YAML Mat data for C++ OpenCV.",
    )
    parser.add_argument(
        "output_name",
        action="store",
        nargs=None,
        const=None,
        default=None,
        type=str,
        choices=None,
        metavar=None,
        help="Mat name stored in output YAML file.",
    )
    args = parser.parse_args()
    input_file = args.input_file
    output_file = args.output_file
    output_name = args.output_name

    FileUtils.convertNumpyMatTxt2OpenCvMatYml(input_file, output_file, output_name)
def copyOriginalFiles(inputDir, outputDir):
    if not os.path.isdir(os.path.join(outputDir,"Input")):
        FileUtils.makedir(os.path.join(outputDir,"Input"))
    if not os.path.isdir(os.path.join(outputDir,"Input","inputImg")):
        FileUtils.makedir(os.path.join(outputDir,"Input","inputImg"))
    if not os.path.isdir(os.path.join(outputDir,"Input","csv")):
        FileUtils.makedir(os.path.join(outputDir,"Input","csv"))
    if not os.path.isdir(os.path.join(outputDir,"Output","matches")):
        FileUtils.makedir(os.path.join(outputDir,"Output","matches"))
    
    os.system("cp --remove-destination " + FILE_COPY_OPTION + " " + os.path.join(inputDir,"Input","*","inputImg","*") + " " + os.path.join(outputDir,"Input","inputImg"))
    os.system("cp --remove-destination " + FILE_COPY_OPTION + " " + os.path.join(inputDir,"Input","*","csv","*") + " " + os.path.join(outputDir,"Input","csv"))
    os.system("cp --remove-destination " + FILE_COPY_OPTION + " " + os.path.join(inputDir,"Output","*","matches","*.desc") + " " + os.path.join(outputDir,"Output","matches"))
    os.system("cp --remove-destination " + FILE_COPY_OPTION + " " + os.path.join(inputDir,"Output","*","matches","*.feat") + " " + os.path.join(outputDir,"Output","matches"))
    os.system("cp --remove-destination " + FILE_COPY_OPTION + " " + os.path.join(inputDir,"Output","final","Output","matches","image_describer.txt") + " " + os.path.join(outputDir,"Output","matches"))    
 def __init__(self, name, imgFolLoc, csvFolLoc, matchesFolLoc, locFolLoc, sfm_dataLoc):
     self.name = name # folder name
     self.mergeOrder = name # structure similar to a tree specifying merge order
     self.imgFolLoc = imgFolLoc # folder dir of input image folder
     self.csvFolLoc = csvFolLoc # folder dir of csv folder
     self.matchesFolLoc = matchesFolLoc # folder of match folder with descriptor
     self.locFolLoc = locFolLoc # folder of localization result
     self.sfm_dataLoc = sfm_dataLoc # file dir of sfm_data.json
             
     # get list of reconstructed frames
     if self.sfm_dataLoc != "":
         sfm_data = FileUtils.loadjson(self.sfm_dataLoc)
         extKeyTmp = [x["key"] for x in sfm_data["extrinsics"]]
         self.reconFrame = [x["value"]["ptr_wrapper"]["data"]["id_view"] for x in sfm_data["views"] if x["value"]["ptr_wrapper"]["data"]["id_pose"] in extKeyTmp]
Example #13
0
def readMatch(locFolder):

    imgname = []
    matchlist = []

    print "Reading loc output: " + locFolder
    for filename in sorted(os.listdir(locFolder)):

        if filename[-4:] != "json":
            continue

        jsondata = FileUtils.loadjson(os.path.join(locFolder, filename))
        imgname.append(os.path.basename(jsondata["filename"]))
        matchlist.append(jsondata["pair"])

    return imgname, matchlist
Example #14
0
def readMatch(locFolder):
    
    imgname = []
    matchlist = []
    
    print "Reading loc output: " + locFolder
    for filename in sorted(os.listdir(locFolder)):
        
        if filename[-4:] != "json":
            continue
        
        jsondata = FileUtils.loadjson(os.path.join(locFolder, filename))
        imgname.append(os.path.basename(jsondata["filename"]))
        matchlist.append(jsondata["pair"])
        
    return imgname, matchlist
    def __init__(self, name, imgFolLoc, csvFolLoc, matchesFolLoc, locFolLoc,
                 sfm_dataLoc):
        self.name = name  # folder name
        self.mergeOrder = name  # structure similar to a tree specifying merge order
        self.imgFolLoc = imgFolLoc  # folder dir of input image folder
        self.csvFolLoc = csvFolLoc  # folder dir of csv folder
        self.matchesFolLoc = matchesFolLoc  # folder of match folder with descriptor
        self.locFolLoc = locFolLoc  # folder of localization result
        self.sfm_dataLoc = sfm_dataLoc  # file dir of sfm_data.json

        # get list of reconstructed frames
        if self.sfm_dataLoc != "":
            sfm_data = FileUtils.loadjson(self.sfm_dataLoc)
            extKeyTmp = [x["key"] for x in sfm_data["extrinsics"]]
            self.reconFrame = [
                x["value"]["ptr_wrapper"]["data"]["id_view"]
                for x in sfm_data["views"]
                if x["value"]["ptr_wrapper"]["data"]["id_pose"] in extKeyTmp
            ]
def main():
    description = 'This script is for merging multiple SfM output models to one SfM model.' + \
                'Please prepare multiple OpenMVG projects which have output SfM models, and matrix to convert to global coordinate.'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('input_csv', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Input CSV file which lists OpenMVG projects which will be merged.')
    parser.add_argument('output_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Output directory path where merged model will be saved.')
    args = parser.parse_args()
    input_csv = args.input_csv
    output_dir = args.output_dir

    # load reconstruct parameters
    reconstructParam = ReconstructParam.ReconstructParam

    # read projects list
    projectList = []
    with open(input_csv, "r") as f:
        reader = csv.reader(f)
        for row in reader:
            project = {}
            project["dir"] = row[0]
            project["sfm_data"] = row[1]
            project["A"] = row[2]
            projectList.append(project)

    # copy source files to output directory
    for project in projectList:
        copyOriginalFiles(project["dir"], output_dir)

    # prepare output directory
    if not os.path.isdir(os.path.join(output_dir, "Ref")):
        FileUtils.makedir(os.path.join(output_dir, "Ref"))
    if not os.path.isdir(os.path.join(output_dir, "Ref", "loc")):
        FileUtils.makedir(os.path.join(output_dir, "Ref", "loc"))
    if not os.path.isdir(os.path.join(output_dir, "Output", "SfM")):
        FileUtils.makedir(os.path.join(output_dir, "Output", "SfM"))
    if not os.path.isdir(
            os.path.join(output_dir, "Output", "SfM", "reconstruction")):
        FileUtils.makedir(
            os.path.join(output_dir, "Output", "SfM", "reconstruction"))
    if not os.path.isdir(
            os.path.join(output_dir, "Output", "SfM", "reconstruction",
                         "global")):
        FileUtils.makedir(
            os.path.join(output_dir, "Output", "SfM", "reconstruction",
                         "global"))

    sfmDataList = []
    sfmViewBeaconDataList = []
    sfmBeaconMap = None
    for project in projectList:
        if not os.path.exists(project["sfm_data"]):
            print "cannot find sfm data : " + project["sfm_data"]
            sys.exit()
        with open(project["sfm_data"]) as jsonFile:
            sfmDataList.append(json.load(jsonFile))

        sfmBeaconFile = os.path.join(os.path.dirname(project["sfm_data"]),
                                     "beacon.txt")
        if os.path.exists(sfmBeaconFile):
            print "find beacon.txt for sfm data : " + project["sfm_data"]
            imgBeaconList, beaconMap = iBeaconUtils.readBeaconData(
                sfmBeaconFile)
            sfmViewBeaconDataList.append(imgBeaconList)
            if sfmBeaconMap is None:
                sfmBeaconMap = beaconMap
            else:
                if sfmBeaconMap != beaconMap:
                    print "invalid find beacon.txt for sfm data : " + project[
                        "sfm_data"]
                    print "beacon.txt should be same for all merged sfm_data"
                    sys.exit()
                else:
                    print "valid beacon.txt for sfm data : " + project[
                        "sfm_data"]

    AList = []
    for project in projectList:
        AList.append(np.loadtxt(project["A"]))
        print "load mat : " + project["A"]
        print(np.loadtxt(project["A"]))

    print "Load 3D points"
    pointIdList = []
    pointList = []
    for sfmData in sfmDataList:
        pointId, point = mergeSfM.getAll3DPointloc(sfmData)
        pointn = np.asarray(point, dtype=np.float).T

        pointIdList.append(pointId)
        pointList.append(pointn)

    # merge models
    mergeSfmData = None
    mergePointId = None
    mergePointn = None
    mergeSfmViewBeaconData = None
    for idx in range(0, len(sfmDataList)):
        if idx == 0:
            mergeSfmData = sfmDataList[0]
            mergeSfM.transform_sfm_data(mergeSfmData, AList[0])
            if len(sfmViewBeaconDataList) > 0:
                mergeSfmViewBeaconData = sfmViewBeaconDataList[0]
        else:
            ransacThres = mergeSfM.findMedianStructurePointsThres(
                mergeSfmData, reconstructParam.ransacStructureThresMul)
            print "thres to merge 3D points : " + str(ransacThres)

            inlierMap = findInliersByKnownTransform(mergePointId,
                                                    pointIdList[idx],
                                                    mergePointn,
                                                    pointList[idx], AList[idx],
                                                    ransacThres)
            print "number of points in base model : " + str(len(
                mergePointn[0]))
            print "number of points in model " + str(idx) + " : " + str(
                len(pointList[idx]))
            print "number of inliers : " + str(len(inlierMap))
            if len(sfmViewBeaconDataList) > 0:
                mergeSfM.merge_sfm_data(mergeSfmData, sfmDataList[idx],
                                        AList[idx],
                                        {x[0]: x[1]
                                         for x in inlierMap},
                                        mergeSfmViewBeaconData,
                                        sfmViewBeaconDataList[idx])
            else:
                mergeSfM.merge_sfm_data(mergeSfmData, sfmDataList[idx],
                                        AList[idx],
                                        {x[0]: x[1]
                                         for x in inlierMap})

        mergePointId, mergePoint = mergeSfM.getAll3DPointloc(mergeSfmData)
        mergePointn = np.asarray(mergePoint, dtype=np.float).T

    # go back to coordinate of the first model
    _invA = np.linalg.inv(AList[0][0:3, 0:3])
    invA = np.c_[_invA, -np.dot(_invA, AList[0][:, 3])]
    mergeSfM.transform_sfm_data(mergeSfmData, invA)

    mergeSfmData["root_path"] = os.path.join(output_dir, "Input", "inputImg")

    resultSfMDataFile = os.path.join(output_dir, "Output", "SfM",
                                     "reconstruction", "global",
                                     "sfm_data.json")

    with open(os.path.join(resultSfMDataFile), "w") as jsonfile:
        json.dump(mergeSfmData, jsonfile)

    if mergeSfmViewBeaconData is not None:
        mergeSfmViewBeaconDataMapList = []
        for key in mergeSfmViewBeaconData:
            mergeSfmViewBeaconDataMap = {}
            mergeSfmViewBeaconDataMap[key] = mergeSfmViewBeaconData[key]
            mergeSfmViewBeaconDataMapList.append(mergeSfmViewBeaconDataMap)
        iBeaconUtils.exportBeaconData(
            len(mergeSfmData["views"]), sfmBeaconMap,
            mergeSfmViewBeaconDataMapList,
            os.path.join(os.path.dirname(resultSfMDataFile), "beacon.txt"))
    '''
    os.system(reconstructParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + resultSfMDataFile + " " + resultSfMDataFile)
    '''

    Amat = AList[0]
    with open(os.path.join(output_dir, "Ref", "Amat.txt"), "w") as AmatFile:
        np.savetxt(AmatFile, Amat)
    FileUtils.convertNumpyMatTxt2OpenCvMatYml(
        os.path.join(output_dir, "Ref", "Amat.txt"),
        os.path.join(output_dir, "Ref", "Amat.yml"), "A")

    # To create same directory structure before merging, create sfm_data.json without structure information in matches directory
    with open(resultSfMDataFile) as fpr:
        sfmData = json.load(fpr)
        sfmData["extrinsics"] = []
        sfmData["control_points"] = []
        sfmData["structure"] = []
        with open(
                os.path.join(output_dir, "Output", "matches", "sfm_data.json"),
                "w") as fpw:
            json.dump(sfmData, fpw)

    print "Execute : " + reconstructParam.WORKSPACE_DIR + "/TrainBoW/Release/TrainBoW " + os.path.join(output_dir,"Output") + " " + \
              os.path.join(output_dir,"Output", "matches", "BOWfile.yml") + " -p=" + os.path.join(output_dir,"Output", "matches", "PCAfile.yml")
    os.system(reconstructParam.WORKSPACE_DIR + "/TrainBoW/Release/TrainBoW " + os.path.join(output_dir,"Output") + " " + \
              os.path.join(output_dir,"Output", "matches", "BOWfile.yml") + " -p=" + os.path.join(output_dir,"Output", "matches", "PCAfile.yml"))

    os.system("openMVG_main_ComputeSfM_DataColor -i " + resultSfMDataFile + \
              " -o " + os.path.join(output_dir,"Output","SfM","reconstruction","global","colorized.ply"))
def saveFinalSfM(projectDir):
    # prepare output directory
    finalOutputDir = os.path.join(projectDir, "Output", "final")
    if not os.path.isdir(finalOutputDir):
        FileUtils.makedir(finalOutputDir)
    if not os.path.isdir(os.path.join(finalOutputDir, "Input")):
        FileUtils.makedir(os.path.join(finalOutputDir, "Input"))
    if not os.path.isdir(os.path.join(finalOutputDir, "Input", "inputImg")):
        FileUtils.makedir(os.path.join(finalOutputDir, "Input", "inputImg"))
    if not os.path.isdir(os.path.join(finalOutputDir, "Input", "csv")):
        FileUtils.makedir(os.path.join(finalOutputDir, "Input", "csv"))
    if not os.path.isdir(os.path.join(finalOutputDir, "Output")):
        FileUtils.makedir(os.path.join(finalOutputDir, "Output"))
    if not os.path.isdir(os.path.join(finalOutputDir, "Output", "matches")):
        FileUtils.makedir(os.path.join(finalOutputDir, "Output", "matches"))
    if not os.path.isdir(os.path.join(finalOutputDir, "Output", "SfM")):
        FileUtils.makedir(os.path.join(finalOutputDir, "Output", "SfM"))
    if not os.path.isdir(
            os.path.join(finalOutputDir, "Output", "SfM", "reconstruction")):
        FileUtils.makedir(
            os.path.join(finalOutputDir, "Output", "SfM", "reconstruction"))
    if not os.path.isdir(
            os.path.join(finalOutputDir, "Output", "SfM", "reconstruction",
                         "global")):
        FileUtils.makedir(
            os.path.join(finalOutputDir, "Output", "SfM", "reconstruction",
                         "global"))

    maxPoseNum = -1
    selectedSfmOutputDir = ''
    # select largest model from "Output/merge_result" at first
    sfmOutputDirs = sorted(
        os.listdir(
            os.path.join(projectDir, "Output", "merge_result", "Output", "SfM",
                         "reconstruction")))
    for sfmOutputDir in sfmOutputDirs:
        sfmDataFile = os.path.join(projectDir, "Output", "merge_result",
                                   "Output", "SfM", "reconstruction",
                                   sfmOutputDir, "sfm_data.json")
        if not os.path.exists(sfmDataFile):
            continue
        with open(sfmDataFile) as fp:
            sfmData = json.load(fp)
            poseNum = len(sfmData["extrinsics"])
            if (poseNum > maxPoseNum):
                selectedSfmOutputDir = os.path.join(projectDir, "Output",
                                                    "merge_result", "Output",
                                                    "SfM", "reconstruction",
                                                    sfmOutputDir)
                maxPoseNum = poseNum
    # select from single 3D model if merged 3D model does not exist
    if not selectedSfmOutputDir:
        outputDirs = sorted(os.listdir(os.path.join(projectDir, "Output")))
        for outputDir in outputDirs:
            outputDirPath = os.path.join(projectDir, "Output", outputDir)
            if not os.path.isdir(outputDirPath):
                continue
            sfmOutputDir = os.path.join(outputDirPath, "SfM", "reconstruction",
                                        "global")
            sfmDataFile = os.path.join(sfmOutputDir, "sfm_data.json")
            if not os.path.exists(sfmDataFile):
                continue
            with open(sfmDataFile) as fp:
                sfmData = json.load(fp)
                poseNum = len(sfmData["extrinsics"])
                if (poseNum > maxPoseNum):
                    selectedSfmOutputDir = sfmOutputDir
                    maxPoseNum = poseNum

    # create symbolic links to all images, csv, and descriptor/feature files
    os.system("cp --remove-destination -s " +
              os.path.join(projectDir, "Input", "*", "inputImg", "*") + " " +
              os.path.join(finalOutputDir, "Input", "inputImg"))
    os.system("cp --remove-destination -s " +
              os.path.join(projectDir, "Input", "*", "csv", "*") + " " +
              os.path.join(finalOutputDir, "Input", "csv"))
    os.system("cp --remove-destination -s " +
              os.path.join(projectDir, "Output", "*", "matches", "*.desc") +
              " " + os.path.join(finalOutputDir, "Output", "matches"))
    os.system("cp --remove-destination -s " +
              os.path.join(projectDir, "Output", "*", "matches", "*.feat") +
              " " + os.path.join(finalOutputDir, "Output", "matches"))
    os.system("cp --remove-destination -s " +
              os.path.join(projectDir, "Output", "*", "matches", "*.bow") +
              " " + os.path.join(finalOutputDir, "Output", "matches"))

    # copy image_describer.txt
    listVideo = sorted(os.listdir(os.path.join(projectDir, "Input")))
    os.system("cp --remove-destination " + os.path.join(
        projectDir, "Output", listVideo[0], "matches", "image_describer.txt") +
              " " + os.path.join(finalOutputDir, "Output", "matches"))

    # copy listbeacon.txt
    os.system("cp --remove-destination " +
              os.path.join(projectDir, "Input", "listbeacon.txt") + " " +
              os.path.join(finalOutputDir, "Input"))

    # copy SfM result
    os.system("cp --remove-destination -s " +
              os.path.join(selectedSfmOutputDir, "sfm_data.json") + " " +
              os.path.join(finalOutputDir, "Output", "SfM", "reconstruction",
                           "global"))
    os.system("cp --remove-destination -s " +
              os.path.join(selectedSfmOutputDir, "colorized.ply") + " " +
              os.path.join(finalOutputDir, "Output", "SfM", "reconstruction",
                           "global"))

    # copy PCAfile.yml and BOWfile.yml if exists
    if os.path.exists(
            os.path.join(projectDir, "Output", "merge_result", "Output",
                         "matches", "PCAfile.yml")):
        os.system("cp --remove-destination " +
                  os.path.join(projectDir, "Output", "merge_result", "Output",
                               "matches", "PCAfile.yml") + " " +
                  os.path.join(finalOutputDir, "Output", "matches"))
    if os.path.exists(
            os.path.join(projectDir, "Output", "merge_result", "Output",
                         "matches", "BOWfile.yml")):
        os.system("cp --remove-destination " +
                  os.path.join(projectDir, "Output", "merge_result", "Output",
                               "matches", "BOWfile.yml") + " " +
                  os.path.join(finalOutputDir, "Output", "matches"))

    # To create same directory structure before merging, create sfm_data.json without structure information in matches directory
    with open(os.path.join(os.path.join(selectedSfmOutputDir,
                                        "sfm_data.json"))) as fpr:
        sfmData = json.load(fpr)
        sfmData["extrinsics"] = []
        sfmData["control_points"] = []
        sfmData["structure"] = []
        with open(
                os.path.join(finalOutputDir, "Output", "matches",
                             "sfm_data.json"), "w") as fpw:
            json.dump(sfmData, fpw)

    # copy beacon.txt if exists
    if os.path.exists(os.path.join(selectedSfmOutputDir, "beacon.txt")):
        os.system("cp --remove-destination " +
                  os.path.join(selectedSfmOutputDir, "beacon.txt") + " " +
                  os.path.join(finalOutputDir, "Output", "SfM",
                               "reconstruction", "global"))
def main():
    mat = FileUtils.loadBinMat(os.path.join("../data","opencv-mat.bin"))
    print "loaded binary matrix : " + str(mat)
def main():
    # set default parameter
    reconstructParam = ReconstructParam.ReconstructParam
    reconstructBOWParam = ReconstructBOWParam.ReconstructBOWParam
    
    # parse parameters
    description = 'This script is for reconstruct 3D models from multiple videos and merge to one 3D model. ' + \
                'BOW is used for accelerating 3D model merge. ' + \
                'Please prepare multiple videos in Input folder.'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('project_path', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where your 3D model project is stored.')
    parser.add_argument('-k', '--path-camera-file', action='store', nargs='?', const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='File path where camera matrix is stored in Numpy text format. (default: focal length ' + \
                            str(reconstructParam.focalLength) + ' will be used)')    
    parser.add_argument('--bow', action='store_true', default=False, \
                        help='Use BOW to accelerate 3D model merge if this flag is set (default: False)')    
    args = parser.parse_args()
    PROJECT_PATH = args.project_path
    USE_BOW = args.bow
    PATH_CAMERA_FILE = args.path_camera_file
    
    if PATH_CAMERA_FILE:
        if os.path.exists(PATH_CAMERA_FILE):
            with open(PATH_CAMERA_FILE,"r") as camMatFile:
                K = np.loadtxt(camMatFile)
            if K.shape[0]!=3 or K.shape[1]!=3:
                print "Error : invalid camera matrix size = " + str(K)
                sys.exit()
            print "Focal length " + str(K[0][0]) + " is set for reconstruction"
            reconstructParam.focalLength = K[0][0]
        else:
            print "Error : invalid camera matrix file = " + PATH_CAMERA_FILE
            sys.exit()
    
    # get paths
    inputPath = os.path.join(PROJECT_PATH, "Input")
    outputPath = os.path.join(PROJECT_PATH, "Output")
    
    FileUtils.makedir(outputPath)
    
    # reconstruct all videos
    listVideo = sorted(os.listdir(inputPath))
    for video in listVideo:
        if not os.path.isdir(os.path.join(inputPath, video)):
            continue
        
        print "Begin reconstructing video : " + video
        
        sfm_mainDir = os.path.join(outputPath, video)
        sfm_inputDir = os.path.join(inputPath, video)
        sfm_inputImgDir = os.path.join(sfm_inputDir, "inputImg")
        sfm_matchesDir = os.path.join(sfm_mainDir, "matches")
        sfm_sfmDir = os.path.join(sfm_mainDir, "SfM")
        sfm_reconstructDir = os.path.join(sfm_sfmDir, "reconstruction")
        sfm_globalDir = os.path.join(sfm_reconstructDir, "global")
                    
        FileUtils.makedir(sfm_mainDir)
        FileUtils.makedir(sfm_inputImgDir)
        FileUtils.makedir(sfm_matchesDir)
        FileUtils.makedir(sfm_sfmDir)
        FileUtils.makedir(sfm_reconstructDir)
        FileUtils.makedir(sfm_globalDir)
        
        if not os.path.isfile(os.path.join(sfm_globalDir, "sfm_data.json")):
            # list images
            os.system("openMVG_main_SfMInit_ImageListing -i " + sfm_inputImgDir + " -o " + sfm_matchesDir + " -d " + reconstructParam.CAMERA_DATABASE_PATH)
            
            # 1.1 Check intrinsic
            # ( if camera parameter not specified then replace with fixed camera.
            # and set appropriate width and height)
            with open(os.path.join(sfm_matchesDir, "sfm_data.json")) as sfm_data_file:
                sfm_data = json.load(sfm_data_file)
                hImg = sfm_data["views"][0]['value']['ptr_wrapper']['data']["height"]
                wImg = sfm_data["views"][0]['value']['ptr_wrapper']['data']["width"]
                if len(sfm_data["intrinsics"]) == 0:
                    for view in sfm_data["views"]:
                        view["value"]["ptr_wrapper"]["data"]["id_intrinsic"] = 0;
                        
                    sfm_data["intrinsics"].append({})
                    sfm_data["intrinsics"][0]["key"] = 0
                    sfm_data["intrinsics"][0]["values"] = {}
                    # sfm_data["intrinsics"][0]["values"]["polymorphic_name"] = "pinhole_radial_k3"
                    sfm_data["intrinsics"][0]["values"]["polymorphic_name"] = "pinhole"
                    sfm_data["intrinsics"][0]["values"]["polymorphic_id"] = 2147999999
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"] = {}
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["id"] = 2147483660
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"] = {}
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"]["width"] = wImg
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"]["height"] = hImg
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"]["focal_length"] = reconstructParam.focalLength
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"]["disto_k3"] = [0, 0, 0]
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"]["principal_point"] = [wImg / 2, hImg / 2]
                    
            with open(os.path.join(sfm_matchesDir, "sfm_data.json"), "w") as sfm_data_file:
                json.dump(sfm_data, sfm_data_file)
                
            # 2 - Features computation and matching
            # ( Compute a list of features & descriptors for each image)
            os.system(reconstructParam.EXTRACT_FEATURE_MATCH_PROJECT_PATH + \
                      " " + sfm_matchesDir + \
                      " -mf=" + str(reconstructParam.maxTrackletMatchDistance) + \
                      " -mm=" + str(reconstructParam.minMatchToRetain) + \
                      " -f=" + str(reconstructParam.extFeatDistRatio) + \
                      " -r=" + str(reconstructParam.extFeatRansacRound))
            
            # OpenMVG assumes matches.e.txt for global reconstruction, matches.f.txt for incremental reconstruction
            os.system("cp " + os.path.join(sfm_matchesDir, "matches.f.txt") + " " + os.path.join(sfm_matchesDir, "matches.e.txt"))
            
            # 3 - Global reconstruction
            countRecon = 1
            while not os.path.isfile(os.path.join(sfm_globalDir, "sfm_data.json")) and countRecon < reconstructParam.rerunRecon:  
                os.system("openMVG_main_GlobalSfM -i " + os.path.join(sfm_matchesDir, "sfm_data.json") + " -m " + sfm_matchesDir + " -o " + sfm_globalDir)
                countRecon = countRecon + 1
                time.sleep(1)
            
            if not os.path.isfile(os.path.join(sfm_globalDir, "sfm_data.json")):
                continue
                
            # 4 - Color the pointcloud
            os.system("openMVG_main_ComputeSfM_DataColor -i " + os.path.join(sfm_globalDir, "sfm_data.json") + " -o " + os.path.join(sfm_globalDir, "colorized.ply"))
            
            # 4.5 remove part of reconstruction where it is incorrect
            # Specifically,sometimes when their matching is not adequate,
            # the reconstructed model will be divided into two or more models
            # with different scale and a "jump" between pose translation.
            # This function detects such jump and retain the the largest 
            # beginning or ending part of reconstruction, while the rest
            # should be reconstructed separately by cleanSfM.
            countCut = 0
            # keep cutting until no more cut
            while cleanSfM.cutSfMDataJump(os.path.join(sfm_globalDir, "sfm_data.json"), bufferFrame=reconstructParam.bufferFrame):
                countCut = countCut + 1
                os.rename(os.path.join(sfm_globalDir, "sfm_data_BC.json"),
                          os.path.join(sfm_globalDir, "sfm_data_BC" + str(countCut) + ".json"))
                os.system(reconstructParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + \
                          " " + os.path.join(sfm_globalDir, "sfm_data.json") + \
                          " " + os.path.join(sfm_globalDir, "sfm_data.json") + \
                          " -c=" + "rs,rst,rsti")
            os.system("openMVG_main_ComputeSfM_DataColor -i " + os.path.join(sfm_globalDir, "sfm_data.json") + " -o " + os.path.join(sfm_globalDir, "colorized_AC.ply"))
         
            # 5 - Clean sfm_data by removing viewID of frames that are not used
            # in reconstruction and put them in another folder and reconstruct them again
            # note that sfm_data.json in matches folder is renamed and kept as reference
            unusedImg = cleanSfM.cleanSfM(os.path.join(sfm_globalDir, "sfm_data.json"),
                                 [os.path.join(sfm_matchesDir, "matches.putative.txt"),
                                  os.path.join(sfm_matchesDir, "matches.e.txt"),
                                  os.path.join(sfm_matchesDir, "matches.f.txt")])
            
            # 6. move unused images, csv files into a new folder unless they have less than x images
            for i in range(0, len(unusedImg)):
                listUnused = unusedImg[i]
                if len(listUnused) < reconstructParam.minUnusedImgLength:
                    continue
                
                # set name for new video
                if i == 0:
                    newVidName = video + "_front"
                elif i == 1:
                    newVidName = video + "_back"
                else:
                    # this should not be called
                    continue
                
                # set path
                pathNewVid = os.path.join(inputPath, newVidName)
                
                # skip if there is already this folder
                if os.path.isdir(pathNewVid):
                    continue
                
                print "Extract unused part of " + video + " into " + newVidName
                
                FileUtils.makedir(pathNewVid)
                
                csvNewVid = os.path.join(pathNewVid, "csv")
                imgNewVid = os.path.join(pathNewVid, "inputImg")
                FileUtils.makedir(csvNewVid)
                FileUtils.makedir(imgNewVid)
                
                # copy image in list and csv file
                os.system("cp -s " + os.path.join(sfm_inputDir, "csv", "*.csv") + " " + csvNewVid)
                for unusedFilename in listUnused:
                    os.system("cp -s " + os.path.join(sfm_inputImgDir, unusedFilename) + " " + imgNewVid)
                
                # append the folder into reconstruction queue
                listVideo.append(newVidName)
    
    # train bag of words model, and extract bag of words feature for all images
    if USE_BOW and not os.path.isfile(os.path.join(outputPath, "merge_result", "Output", "matches", "BOWfile.yml")):
        outputBowPath = os.path.join(outputPath, "merge_result", "Output", "matches")
        if not os.path.isdir(outputBowPath):
            FileUtils.makedir(outputBowPath)
        print "Execute Training BOW : " + reconstructParam.WORKSPACE_DIR + "/TrainBoW/Release/TrainBoW " + outputPath + " " + \
                  os.path.join(outputBowPath, "BOWfile.yml") + " -p=" + os.path.join(outputBowPath, "PCAfile.yml")
        os.system(reconstructParam.WORKSPACE_DIR + "/TrainBoW/Release/TrainBoW " + outputPath + " " + \
                  os.path.join(outputBowPath, "BOWfile.yml") + " -p=" + os.path.join(outputBowPath, "PCAfile.yml"))
    
    # load graph structure from "mergeGraph.txt" if it exists
    # create new graph structure if it does not exist
    if os.path.isfile(os.path.join(outputPath, "merge_result", "Output", "SfM", "reconstruction", "mergeGraph.txt")):
        if USE_BOW:
            sfmGraph = sfmMergeGraphBOW.sfmGraphBOW.load(os.path.join(outputPath, "merge_result", "Output", "SfM", "reconstruction", "mergeGraph.txt"))
        else:
            sfmGraph = sfmMergeGraph.sfmGraph.load(os.path.join(outputPath, "merge_result", "Output", "SfM", "reconstruction", "mergeGraph.txt"))
        sfmGraph.workspacePath = reconstructParam.WORKSPACE_DIR
        
        #### start of manually adding new model code ####
        # In current code, you cannot add new 3D model once you start merging.
        # Enable following commented code to add new 3D model after you already started merging.
        '''
        newModelToAdd = []
        for newModelName in newModelToAdd:
            addModel(newModelName,os.path.join(inputPath,newModelName),os.path.join(outputPath,newModelName))
        sfmGraph.clearBadMatches()
        '''
        ### end of manually adding new model code ###
    else:
        if USE_BOW:
            sfmGraph = sfmMergeGraphBOW.sfmGraphBOW(inputPath,
                                                    outputPath,
                                                    os.path.join(outputPath, "merge_result", "Input"),
                                                    os.path.join(outputPath, "merge_result", "Output", "SfM", "reconstruction"),
                                                    os.path.join(outputPath, "merge_result", "Output", "matches"),
                                                    os.path.join(outputPath, "merge_result", "Input", "csv"),
                                                    os.path.join(outputPath, "merge_result", "Input", "inputImg"),
                                                    reconstructParam.WORKSPACE_DIR,
                                                    reconstructParam.minReconFrameToAdd)
        else:
            sfmGraph = sfmMergeGraph.sfmGraph(inputPath,
                                              outputPath,
                                              os.path.join(outputPath, "merge_result", "Input"),
                                              os.path.join(outputPath, "merge_result", "Output", "SfM", "reconstruction"),
                                              os.path.join(outputPath, "merge_result", "Output", "matches"),
                                              os.path.join(outputPath, "merge_result", "Input", "csv"),
                                              os.path.join(outputPath, "merge_result", "Input", "inputImg"),
                                              reconstructParam.WORKSPACE_DIR,
                                              reconstructParam.minReconFrameToAdd)
    
    if USE_BOW:
        sfmGraph.mergeModel(os.path.join(outputPath, listVideo[0], "matches", "image_describer.txt"),
                            inputPath,
                            outputPath,
                            reconParam=reconstructParam,
                            reconBOWParam=reconstructBOWParam)
    else:
        sfmGraph.mergeModel(os.path.join(outputPath, listVideo[0], "matches", "image_describer.txt"),
                            inputPath,
                            outputPath,
                            reconParam=reconstructParam)
    
    # select largest 3D model and save it
    SfMDataUtils.saveFinalSfM(PROJECT_PATH)
    def mergeOneModel(self, model1, model2, reconParam):
        
        sfmOutPath = os.path.join(self.mSfMPath,"global"+str(self.nMergedModel))
        
        # create a temporary folder for reconstructed image of model2
        inputImgTmpFolder = os.path.join(self.mSfMPath,"inputImgTmp","inputImgTmp"+model2.name)
                
        # copy reconstructed image fom model2 to tmp folder
        sfm_data2 = FileUtils.loadjson(model2.sfm_dataLoc)
        if not os.path.isdir(inputImgTmpFolder):
            listReconFrameName = [sfm_data2["views"][x]["value"]["ptr_wrapper"]["data"]["filename"] for x in range(0,len(sfm_data2["views"])) if sfm_data2["views"][x]["value"]["ptr_wrapper"]["data"]["id_view"] in model2.reconFrame]
            FileUtils.makedir(inputImgTmpFolder)
            for reconFrameName in listReconFrameName:
                os.system("cp -s " + os.path.join(model2.imgFolLoc,reconFrameName) + " " + inputImgTmpFolder)
        
        
        # remove all old localization result
        FileUtils.removedir(model2.locFolLoc) 
        FileUtils.makedir(model2.locFolLoc)

        # localize the images from model2 on model1
        os.system(reconParam.LOCALIZE_PROJECT_PATH + \
                  " " + inputImgTmpFolder + \
                  " " + os.path.dirname(model1.sfm_dataLoc) + \
                  " " + self.mMatchesPath + \
                  " " + model2.locFolLoc + \
                  " -f=" + str(reconParam.locFeatDistRatio) + \
                  " -r=" + str(reconParam.locRansacRound) + \
                  " -e=" + model2.csvFolLoc + \
                  " -i=" + str(reconParam.locSkipFrame))
                  
        # remove temporary image folder
        # removedir(inputImgTmpFolder)
        
        # extract centers from all json file and write to a file
        fileLoc = open(os.path.join(model2.locFolLoc,"center.txt"),"w")
        countLocFrame = 0
        for filename in sorted(os.listdir(model2.locFolLoc)):
            if filename[-4:]!="json":
                continue
            
            countLocFrame = countLocFrame + 1
            with open(os.path.join(model2.locFolLoc,filename)) as locJson:
                #print os.path.join(sfm_locOut,filename)
                locJsonDict = json.load(locJson)
                loc = locJsonDict["t"]
                fileLoc.write(str(loc[0]) + " "  + str(loc[1]) + " "  +str(loc[2]) + " 255 0 0\n" )   
        fileLoc.close() 
        
        # get inlier matches
        FileUtils.makedir(sfmOutPath)
        resultSfMDataFile = os.path.join(sfmOutPath,"sfm_data.json")
        # below also checks if the ratio between first and last svd of M[0:3,0:3] 
        # is good or not. If not then reject
        nInlierTmp, M = mergeSfM.mergeModel(model1.sfm_dataLoc,
                            model2.sfm_dataLoc,
                            model2.locFolLoc,
                            resultSfMDataFile,
                            ransacK=reconParam.ransacStructureThresMul,
                            ransacRound=reconParam.ransacRoundMul*len(model1.reconFrame),
                            inputImgDir=self.mInputImgPath,
                            minLimit=reconParam.min3DnInliers) 

        # 3. perform test whether merge is good
        sfm_merge_generated = True
        countFileAgree = 0
        countFileLoc = 1
        if os.path.isfile(resultSfMDataFile):
            os.system(reconParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + resultSfMDataFile + " " + resultSfMDataFile)
            countFileLoc, countFileAgree = mergeSfM.modelMergeCheckLocal(resultSfMDataFile, model2.locFolLoc, reconParam.vldMergeAgrFrameThresK)
        else:
            sfm_merge_generated = False
        
        ratioAgreeFrameReconFrame = 0.0
        if (len(model2.reconFrame)>0):
            ratioAgreeFrameReconFrame = float(countFileAgree)/len(model2.reconFrame)
        ratioAgreeFrameLocFrame = 0.0
        if (countFileLoc>0):
            ratioAgreeFrameLocFrame = float(countFileAgree)/countFileLoc
        
        # write log file
        with open(os.path.join(self.mSfMPath,"global"+str(self.nMergedModel),"log.txt"),"a") as filelog:
            filelog.write(("M1: " + model1.name + "\n" + \
                          "M2: " + model2.name + "\n" + \
                          "nInliers: " + str(nInlierTmp) + "\n" + \
                          "countLocFrame: " + str(countLocFrame) + "\n" + \
                          "nReconFrame M2: " + str(len(model2.reconFrame)) + "\n" + \
                          "countFileAgree: " + str(countFileAgree) + "\n" + \
                          "countFileLoc: " + str(countFileLoc) + "\n" + \
                          "not sfm_merge_generated: " + str(not sfm_merge_generated) + "\n" + \
                          # obsolete condition by T. Ishihara 2015.11.10
                          #"nInlierTmp > "+str(reconParam.vldMergeRatioInliersFileagree)+"*countFileAgree: " + str(nInlierTmp > reconParam.vldMergeRatioInliersFileagree*countFileAgree) + "\n" + \
                          "countFileAgree > "+str(reconParam.vldMergeMinCountFileAgree)+": " + str(countFileAgree > reconParam.vldMergeMinCountFileAgree) + "\n" + \
                          # obsolete condition by T. Ishihara 2016.04.02
                          #"countFileAgree > "+str(reconParam.vldMergeSmallMinCountFileAgree)+": " + str(countFileAgree > reconParam.vldMergeSmallMinCountFileAgree) + "\n" + \
                          # obsolete condition by T. Ishihara 2016.04.02                          
                          #"countFileLoc < countFileAgree*" +str(reconParam.vldMergeShortRatio)+ ": " + str(countFileLoc < countFileAgree*reconParam.vldMergeShortRatio) + "\n" + \
                          "ratioLocAgreeWithReconFrame: " + str(ratioAgreeFrameReconFrame) + "\n" + \
                          "ratioLocAgreeWithReconFrame > " + str(reconParam.vldMergeRatioAgrFReconF) + ": " + str(ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconF) + "\n" + \
                          "ratioLocAgreeWithLocFrame: " + str(ratioAgreeFrameLocFrame) + "\n" + \
                          "ratioLocAgreeWithLocFrame > " + str(reconParam.vldMergeRatioAgrFLocF) + ": " + str(ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF) + "\n" + \
                          str(M) + "\n\n"))
       
        # rename the localization folder to save localization result
        if os.path.isdir(model2.locFolLoc+model1.name):
            FileUtils.removedir(model2.locFolLoc+model1.name)
        os.rename(model2.locFolLoc,model2.locFolLoc+model1.name)

        # obsolete merge condition
        '''
        if not sfm_merge_generated or \
            not (nInlierTmp > reconParam.vldMergeRatioInliersFileagree*countFileAgree and \
            ((countFileAgree > reconParam.vldMergeMinCountFileAgree or (countFileAgree > reconParam.vldMergeSmallMinCountFileAgree and countFileLoc < countFileAgree*reconParam.vldMergeShortRatio)) and \
            ((nInlierTmp > reconParam.vldMergeNInliers and float(countFileAgree)/len(model2.reconFrame) > reconParam.vldMergeRatioAgrFReconFNInliers) or float(countFileAgree)/countFileLoc > reconParam.vldMergeRatioAgrFLocF) and
            (float(countFileAgree)/len(model2.reconFrame) > reconParam.vldMergeRatioAgrFReconF))):
        '''
        # update merge condition by T. Ishihara 2015.11.10
        '''
        if not sfm_merge_generated or \
            not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
                 countFileAgree > reconParam.vldMergeSmallMinCountFileAgree and \
                 countFileLoc < countFileAgree*reconParam.vldMergeShortRatio and \
                 ((nInlierTmp > reconParam.vldMergeNInliers and ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconFNInliers) or \
                    ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconF) and \
                 ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF):
        '''
        # update merge condition by T. Ishihara 2016.04.02
        if not sfm_merge_generated or \
            not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
                 ((nInlierTmp > reconParam.vldMergeNInliers and ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconFNInliers) or \
                    ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconF) and \
                 ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF):
            print "Transformed locations do not agree with localization. Skip merge between " + model1.name + " and " + model2.name + "."
                    
            if os.path.isfile(os.path.join(sfmOutPath,"sfm_data.json")):
                os.rename(os.path.join(sfmOutPath,"sfm_data.json"), \
                          os.path.join(sfmOutPath,"sfm_data_("+model1.name + "," + model2.name+").json"))
                               
            # move to next video
            return False, sfmModel("","","","","","")
                
        # generate colorized before bundle adjustment for comparison
        os.system("openMVG_main_ComputeSfM_DataColor " +
            " -i " + os.path.join(sfmOutPath,"sfm_data.json") +
            " -o " + os.path.join(sfmOutPath,"colorized_pre.ply"))        
                
        # perform bundle adjustment
        # modified by T.Ishihara 2016.04.08
        # fix only translation at first
        '''
        os.system(reconParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + os.path.join(sfmOutPath,"sfm_data.json") + " " + os.path.join(sfmOutPath,"sfm_data.json") + \
                  " -c=" + "rs,rst,rsti" + " -r=" + "1")
        '''
        os.system(reconParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + os.path.join(sfmOutPath,"sfm_data.json") + " " + os.path.join(sfmOutPath,"sfm_data.json") + \
                  " -c=" + "st,rst,rsti" + " -r=" + "1")
        
        os.system("openMVG_main_ComputeSfM_DataColor " +
            " -i " + os.path.join(sfmOutPath,"sfm_data.json") +
            " -o " + os.path.join(sfmOutPath,"colorized.ply"))
        
        return True, sfmModel("A" + model1.name + "," + model2.name +"Z", self.mInputImgPath, self.mCsvPath, self.mMatchesPath, os.path.join(sfmOutPath,"loc"), resultSfMDataFile)
Example #21
0
def mergeModel(sfm_data_dirA, sfm_data_dirB, locFolderB, outfile, ransacThres, mergePointThres, ransacRoundMul=100, inputImgDir="", minLimit=4, svdRatio=1.75):
    
    print "Loading sfm_data"
    sfm_dataB = FileUtils.loadjson(sfm_data_dirB)
    
    # read matching pairs from localization result
    imgnameB, matchlistB = readMatch(locFolderB)
    
    # get viewID from image name for model B
    viewIDB = imgnameToViewID(imgnameB, sfm_dataB)

    # get mapping between viewID,featID to 3D point ID
    viewFeatMapB = getViewFeatTo3DMap(sfm_dataB)

    # find consistent match between 3D of model B to 3D of model A
    print "Calculating consistent 3D matches"
    match3D_BA = getConsistent3DMatch(viewIDB, matchlistB, viewFeatMapB)
    print "Found " + str(len(match3D_BA)) + " consistent matches"
    
    # not enough matches
    if len(match3D_BA) <= 4 or len(match3D_BA) <= minLimit:
        return len(match3D_BA), len(match3D_BA), np.asarray([])
 
    # move the load of larger model here to reduce time if merging is not possible
    sfm_dataA = FileUtils.loadjson(sfm_data_dirA)
 
    # get 3D point. Note that element 0 of each pair in match3D_BA
    # is 3D pt ID of model B and element 1 is that of model A
    print "Load 3D points"
    pointA = get3DPointloc(sfm_dataA, [x[1] for x in match3D_BA])
    pointB = get3DPointloc(sfm_dataB, [x[0] for x in match3D_BA])
    
    pointAn = np.asarray(pointA, dtype=np.float).T
    pointBn = np.asarray(pointB, dtype=np.float).T
        
    # find robust transformation
    print "Find transformation with RANSAC"
    ransacRound = len(match3D_BA)*ransacRoundMul
    print "Number of RANSAC round : " + str(ransacRound)
    M, inliers = ransacTransform(pointAn, pointBn, ransacThres, ransacRound, svdRatio)
    
    # cannot find RANSAC transformation
    if (M.size==0):
        return len(match3D_BA), len(match3D_BA), np.asarray([])
    print M
    
    # stop if not enough inliers
    sSvd = np.linalg.svd(M[0:3,0:3],compute_uv=0)
    # fixed by T.Ishihara to use minLimit 2016.06.06
    #if len(inliers) <= 4 or sSvd[0]/sSvd[-1] > svdRatio:
    if len(inliers) <= minLimit or sSvd[0]/sSvd[-1] > svdRatio:
        return len(match3D_BA), len(inliers), M
        
    # perform merge 
    # last argument is map from inliers 3D pt Id of model B to that of model A
    print "Merging sfm_data"
    # fixed by T. Ishihara, use different parameter to find ransac inlier and merge points inliers
    '''
    merge_sfm_data(sfm_dataA, sfm_dataB, M, {match3D_BA[x][0]: match3D_BA[x][1] for x in inliers})
    '''
    mergePointInliers = getInliersByAffineTransform(pointAn, pointBn, M, mergePointThres)
    merge_sfm_data(sfm_dataA, sfm_dataB, M, {match3D_BA[x][0]: match3D_BA[x][1] for x in mergePointInliers})
    
    # change input image folder
    if inputImgDir != "":
        sfm_dataA["root_path"] = inputImgDir
    
    # save json file
    print "Saving json file"
    FileUtils.savejson(sfm_dataA,outfile)
    
    # return number of inliers for transformation
    return len(match3D_BA), len(inliers), M
def main():
    description = 'This script is for merging multiple SfM output models to one SfM model.' + \
                'Please prepare multiple OpenMVG projects which have output SfM models, and matrix to convert to global coordinate.'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('input_csv', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Input CSV file which lists OpenMVG projects which will be merged.')
    parser.add_argument('output_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Output directory path where merged model will be saved.')
    args = parser.parse_args()
    input_csv = args.input_csv
    output_dir = args.output_dir
        
    # load reconstruct parameters
    reconstructParam = ReconstructParam.ReconstructParam
    
    # read projects list
    projectList = []
    with open(input_csv, "r") as f:
        reader = csv.reader(f)
        for row in reader:
            project = {}
            project["dir"]  = row[0]
            project["sfm_data"]  = row[1]
            project["A"] = row[2]
            projectList.append(project)
    
    # copy source files to output directory
    for project in projectList:
        copyOriginalFiles(project["dir"], output_dir)
    
    # prepare output directory
    if not os.path.isdir(os.path.join(output_dir,"Ref")):
        FileUtils.makedir(os.path.join(output_dir,"Ref"))
    if not os.path.isdir(os.path.join(output_dir,"Ref","loc")):
        FileUtils.makedir(os.path.join(output_dir,"Ref","loc"))
    if not os.path.isdir(os.path.join(output_dir,"Output","SfM")):
        FileUtils.makedir(os.path.join(output_dir,"Output","SfM"))
    if not os.path.isdir(os.path.join(output_dir,"Output","SfM","reconstruction")):
        FileUtils.makedir(os.path.join(output_dir,"Output","SfM","reconstruction"))
    if not os.path.isdir(os.path.join(output_dir,"Output","SfM","reconstruction","global")):
        FileUtils.makedir(os.path.join(output_dir,"Output","SfM","reconstruction","global"))
    
    sfmDataList = []
    sfmViewBeaconDataList = []
    sfmBeaconMap = None
    for project in projectList:
        if not os.path.exists(project["sfm_data"]):
            print "cannot find sfm data : " + project["sfm_data"]
            sys.exit()
        with open(project["sfm_data"]) as jsonFile:
            sfmDataList.append(json.load(jsonFile))
        
        sfmBeaconFile = os.path.join(os.path.dirname(project["sfm_data"]), "beacon.txt")
        if os.path.exists(sfmBeaconFile):
            print "find beacon.txt for sfm data : " + project["sfm_data"]
            imgBeaconList, beaconMap = iBeaconUtils.readBeaconData(sfmBeaconFile)
            sfmViewBeaconDataList.append(imgBeaconList)
            if sfmBeaconMap is None:
                sfmBeaconMap = beaconMap
            else:
                if sfmBeaconMap!=beaconMap:
                    print "invalid find beacon.txt for sfm data : " + project["sfm_data"]
                    print "beacon.txt should be same for all merged sfm_data"
                    sys.exit()
                else:
                    print "valid beacon.txt for sfm data : " + project["sfm_data"]
    
    AList = []
    for project in projectList:
        AList.append(np.loadtxt(project["A"]))
        print "load mat : " + project["A"]
        print (np.loadtxt(project["A"]))
    
    print "Load 3D points"
    pointIdList = []
    pointList = []
    for sfmData in sfmDataList:
        pointId, point = mergeSfM.getAll3DPointloc(sfmData)
        pointn = np.asarray(point, dtype=np.float).T
        
        pointIdList.append(pointId)
        pointList.append(pointn)
    
    # merge models
    mergeSfmData = None
    mergePointId = None
    mergePointn = None
    mergeSfmViewBeaconData = None
    for idx in range(0, len(sfmDataList)):
        if idx==0:
            mergeSfmData = sfmDataList[0]
            mergeSfM.transform_sfm_data(mergeSfmData, AList[0])
            if len(sfmViewBeaconDataList)>0:
                mergeSfmViewBeaconData = sfmViewBeaconDataList[0]
        else:
            mergePointThres = mergeSfM.findMedianStructurePointsThres(mergeSfmData, reconstructParam.mergePointThresMul)
            print "thres to merge 3D points : " + str(mergePointThres)
            
            inlierMap = findInliersByKnownTransform(mergePointId, pointIdList[idx], mergePointn, pointList[idx], AList[idx], mergePointThres)
            print "number of points in base model : " + str(len(mergePointn[0]))
            print "number of points in model " + str(idx) + " : " + str(len(pointList[idx]))
            print "number of inliers : " + str(len(inlierMap))
            if len(sfmViewBeaconDataList)>0:
                mergeSfM.merge_sfm_data(mergeSfmData, sfmDataList[idx], AList[idx], {x[0]: x[1] for x in inlierMap}, mergeSfmViewBeaconData, sfmViewBeaconDataList[idx])
            else:
                mergeSfM.merge_sfm_data(mergeSfmData, sfmDataList[idx], AList[idx], {x[0]: x[1] for x in inlierMap})
        
        mergePointId, mergePoint = mergeSfM.getAll3DPointloc(mergeSfmData)
        mergePointn = np.asarray(mergePoint, dtype=np.float).T
    
    # go back to coordinate of the first model
    _invA = np.linalg.inv(AList[0][0:3,0:3])
    invA = np.c_[_invA, -np.dot(_invA,AList[0][:,3])]
    mergeSfM.transform_sfm_data(mergeSfmData, invA)
    
    mergeSfmData["root_path"] = os.path.join(output_dir,"Input","inputImg")
    
    resultSfMDataFile = os.path.join(output_dir,"Output","SfM","reconstruction","global","sfm_data.json")
    
    with open(os.path.join(resultSfMDataFile),"w") as jsonfile:
        json.dump(mergeSfmData, jsonfile)
    
    if mergeSfmViewBeaconData is not None:
        mergeSfmViewBeaconDataMapList = []
        for key in mergeSfmViewBeaconData:
            mergeSfmViewBeaconDataMap = {}
            mergeSfmViewBeaconDataMap[key] = mergeSfmViewBeaconData[key]
            mergeSfmViewBeaconDataMapList.append(mergeSfmViewBeaconDataMap)
        iBeaconUtils.exportBeaconData(len(mergeSfmData["views"]), sfmBeaconMap, mergeSfmViewBeaconDataMapList, 
                                      os.path.join(os.path.dirname(resultSfMDataFile), "beacon.txt"))
    
    '''
    os.system(reconstructParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + resultSfMDataFile + " " + resultSfMDataFile)
    '''
    os.system(reconstructParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + resultSfMDataFile + " " + resultSfMDataFile + \
              " -c=" + "rst,rsti" + " -r=" + "1")
    
    Amat = AList[0]
    with open(os.path.join(output_dir,"Ref","Amat.txt"),"w") as AmatFile:
        np.savetxt(AmatFile,Amat)
    FileUtils.convertNumpyMatTxt2OpenCvMatYml(os.path.join(output_dir,"Ref","Amat.txt"), os.path.join(output_dir,"Ref","Amat.yml"), "A")
    
    # To create same directory structure before merging, create sfm_data.json without structure information in matches directory
    with open(resultSfMDataFile) as fpr:
        sfmData = json.load(fpr)
        sfmData["extrinsics"] = []
        sfmData["control_points"] = []
        sfmData["structure"] = []
        with open(os.path.join(output_dir,"Output","matches","sfm_data.json"),"w") as fpw:
            json.dump(sfmData, fpw)
    
    print "Execute : " + reconstructParam.WORKSPACE_DIR + "/TrainBoW/Release/TrainBoW " + os.path.join(output_dir,"Output") + " " + \
              os.path.join(output_dir,"Output", "matches", "BOWfile.yml") + " -p=" + os.path.join(output_dir,"Output", "matches", "PCAfile.yml")
    os.system(reconstructParam.WORKSPACE_DIR + "/TrainBoW/Release/TrainBoW " + os.path.join(output_dir,"Output") + " " + \
              os.path.join(output_dir,"Output", "matches", "BOWfile.yml") + " -p=" + os.path.join(output_dir,"Output", "matches", "PCAfile.yml"))
    
    os.system("openMVG_main_ComputeSfM_DataColor -i " + resultSfMDataFile + \
              " -o " + os.path.join(output_dir,"Output","SfM","reconstruction","global","colorized.ply"))
    def mergeOneModel(self, model1, model2, reconParam, reconIBeaconParam, reconBOWParam):
        
        sfmOutPath = os.path.join(self.mSfMPath,"global"+str(self.nMergedModel))
        
        # modified by T. IShihara 2016.06.14
        # fix file name too long issue
        # 
        # create a temporary folder for reconstructed image of model2
        #inputImgTmpFolder = os.path.join(self.mSfMPath,"inputImgTmp","inputImgTmp"+model2.name)        
        inputImgTmpFolder = os.path.join(self.mSfMPath,"inputImgTmp","inputImgTmpModel2")
        if os.path.isdir(inputImgTmpFolder):
            FileUtils.removedir(inputImgTmpFolder)
        
        # copy reconstructed image fom model2 to tmp folder
        sfm_data2 = FileUtils.loadjson(model2.sfm_dataLoc)
        if not os.path.isdir(inputImgTmpFolder):
            listReconFrameName = [sfm_data2["views"][x]["value"]["ptr_wrapper"]["data"]["filename"] for x in range(0,len(sfm_data2["views"])) if sfm_data2["views"][x]["value"]["ptr_wrapper"]["data"]["id_view"] in model2.reconFrame]
            FileUtils.makedir(inputImgTmpFolder)
            for reconFrameName in listReconFrameName:
                os.system("cp -s " + os.path.join(model2.imgFolLoc,reconFrameName) + " " + inputImgTmpFolder)
        
        
        # remove all old localization result
        FileUtils.removedir(model2.locFolLoc) 
        FileUtils.makedir(model2.locFolLoc)

        # localize the images from model2 on model1
        if self.useBow:
            os.system(reconIBeaconParam.LOCALIZE_PROJECT_PATH + \
                      " " + inputImgTmpFolder + \
                      " " + os.path.dirname(model1.sfm_dataLoc) + \
                      " " + self.mMatchesPath + \
                      " " + model2.locFolLoc + \
                      " -f=" + str(reconParam.locFeatDistRatio) + \
                      " -r=" + str(reconParam.locRansacRound) + \
                      " -b=" + model1.beaconFileLoc + \
                      " -e=" + model2.csvFolLoc + \
                      " -k=" + str(reconIBeaconParam.locKNNnum) + \
                      " -c=" + str(reconIBeaconParam.coocThres) + \
                      " -i=" + str(reconParam.locSkipFrame) + \
                      " -v=" + str(reconIBeaconParam.locSkipSelKNN) + \
                      " -n=" + str(reconIBeaconParam.normApproach) + \
                      " -kb=" + str(reconBOWParam.locKNNnum) + \
                      " -a=" + os.path.join(self.mMatchesPath, "BOWfile.yml") + \
                      " -p=" + os.path.join(self.mMatchesPath, "PCAfile.yml"))                                  
        else:
            os.system(reconIBeaconParam.LOCALIZE_PROJECT_PATH + \
                      " " + inputImgTmpFolder + \
                      " " + os.path.dirname(model1.sfm_dataLoc) + \
                      " " + self.mMatchesPath + \
                      " " + model2.locFolLoc + \
                      " -f=" + str(reconParam.locFeatDistRatio) + \
                      " -r=" + str(reconParam.locRansacRound) + \
                      " -b=" + model1.beaconFileLoc + \
                      " -e=" + model2.csvFolLoc + \
                      " -k=" + str(reconIBeaconParam.locKNNnum) + \
                      " -c=" + str(reconIBeaconParam.coocThres) + \
                      " -i=" + str(reconParam.locSkipFrame) + \
                      " -v=" + str(reconIBeaconParam.locSkipSelKNN) + \
                      " -n=" + str(reconIBeaconParam.normApproach))
                  
        # remove temporary image folder
        # removedir(inputImgTmpFolder)
        
        # extract centers from all json file and write to a file
        fileLoc = open(os.path.join(model2.locFolLoc,"center.txt"),"w")
        countLocFrame = 0
        for filename in sorted(os.listdir(model2.locFolLoc)):
            if filename[-4:]!="json":
                continue
            
            countLocFrame = countLocFrame + 1
            with open(os.path.join(model2.locFolLoc,filename)) as locJson:
                #print os.path.join(sfm_locOut,filename)
                locJsonDict = json.load(locJson)
                loc = locJsonDict["t"]
                fileLoc.write(str(loc[0]) + " "  + str(loc[1]) + " "  +str(loc[2]) + " 255 0 0\n" )   
        fileLoc.close() 
        
        # get inlier matches
        FileUtils.makedir(sfmOutPath)
        resultSfMDataFile = os.path.join(sfmOutPath,"sfm_data.json")
        # below also checks if the ratio between first and last svd of M[0:3,0:3] 
        # is good or not. If not then reject
        # TODO : revisit ransacRound parameter, use number of reconstruction frame to determine structure points transform seems small
        nMatchPointsTmp, nInlierTmp, M = mergeSfM.mergeModel(model1.sfm_dataLoc,
                            model2.sfm_dataLoc,
                            model2.locFolLoc,
                            resultSfMDataFile,
                            ransacThres=model1.ransacStructureThres,
                            mergePointThres=model1.mergeStructureThres,
                            ransacRoundMul=reconParam.ransacRoundMul,
                            inputImgDir=self.mInputImgPath,
                            minLimit=reconParam.min3DnInliers)
        
        ratioInlierMatchPoints = 0.0
        if nMatchPointsTmp>0:
            ratioInlierMatchPoints = float(nInlierTmp)/nMatchPointsTmp
        
        # 3. perform test whether merge is good
        sfm_merge_generated = True
        countFileAgree = 0
        countFileLoc = 1
        if os.path.isfile(resultSfMDataFile):
            os.system(reconParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + resultSfMDataFile + " " + resultSfMDataFile)
            countFileLoc, countFileAgree = mergeSfM.modelMergeCheckLocal(resultSfMDataFile, model2.locFolLoc, model1.validMergeRansacThres)
        else:
            sfm_merge_generated = False
        
        ratioAgreeFrameReconFrame = 0.0
        if (len(model2.reconFrame)>0):
            ratioAgreeFrameReconFrame = float(countFileAgree)/len(model2.reconFrame)
        ratioAgreeFrameLocFrame = 0.0
        if (countFileLoc>0):
            ratioAgreeFrameLocFrame = float(countFileAgree)/countFileLoc
        
        # write log file
        with open(os.path.join(self.mSfMPath,"global"+str(self.nMergedModel),"log.txt"),"a") as filelog:
            filelog.write(("M1: " + model1.name + "\n" + \
                          "M2: " + model2.name + "\n" + \
                          "nMatchedPoints: " + str(nMatchPointsTmp) + "\n" + \
                          "nInliers: " + str(nInlierTmp) + "\n" + \
                          "ratioInlierWithMatchedPoints: " + str(ratioInlierMatchPoints) + "\n" + \
                          "countLocFrame: " + str(countLocFrame) + "\n" + \
                          "nReconFrame M2: " + str(len(model2.reconFrame)) + "\n" + \
                          "countFileAgree: " + str(countFileAgree) + "\n" + \
                          "countFileLoc: " + str(countFileLoc) + "\n" + \
                          "not sfm_merge_generated: " + str(not sfm_merge_generated) + "\n" + \
                          # obsolete condition by T. Ishihara 2015.11.10
                          #"nInlierTmp > "+str(reconParam.vldMergeRatioInliersFileagree)+"*countFileAgree: " + str(nInlierTmp > reconParam.vldMergeRatioInliersFileagree*countFileAgree) + "\n" + \
                          "countFileAgree > "+str(reconParam.vldMergeMinCountFileAgree)+": " + str(countFileAgree > reconParam.vldMergeMinCountFileAgree) + "\n" + \
                          # obsolete condition by T. Ishihara 2016.04.02
                          #"countFileAgree > "+str(reconParam.vldMergeSmallMinCountFileAgree)+": " + str(countFileAgree > reconParam.vldMergeSmallMinCountFileAgree) + "\n" + \
                          # obsolete condition by T. Ishihara 2016.04.02
                          #"countFileLoc < countFileAgree*" +str(reconParam.vldMergeShortRatio)+ ": " + str(countFileLoc < countFileAgree*reconParam.vldMergeShortRatio) + "\n" + \
                          "ratioLocAgreeWithReconFrame: " + str(ratioAgreeFrameReconFrame) + "\n" + \
                          "ratioLocAgreeWithReconFrame > " + str(reconParam.vldMergeRatioAgrFReconF) + ": " + str(ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconF) + "\n" + \
                          "ratioLocAgreeWithLocFrame: " + str(ratioAgreeFrameLocFrame) + "\n" + \
                          "ratioLocAgreeWithLocFrame > " + str(reconParam.vldMergeRatioAgrFLocF) + ": " + str(ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF) + "\n" + \
                          str(M) + "\n\n"))
       
        # rename the localization folder to save localization result
        '''
        if os.path.isdir(model2.locFolLoc+model1.name):
            FileUtils.removedir(model2.locFolLoc+model1.name)
        os.rename(model2.locFolLoc,model2.locFolLoc+model1.name)
        '''
        
        # obsolete merge condition
        '''
        if not sfm_merge_generated or \
            not (nInlierTmp > reconParam.vldMergeRatioInliersFileagree*countFileAgree and \
            ((countFileAgree > reconParam.vldMergeMinCountFileAgree or (countFileAgree > reconParam.vldMergeSmallMinCountFileAgree and countFileLoc < countFileAgree*reconParam.vldMergeShortRatio)) and \
            ((nInlierTmp > reconParam.vldMergeNInliers and float(countFileAgree)/len(model2.reconFrame) > reconParam.vldMergeRatioAgrFReconFNInliers) or float(countFileAgree)/countFileLoc > reconParam.vldMergeRatioAgrFLocF) and
            (float(countFileAgree)/len(model2.reconFrame) > reconParam.vldMergeRatioAgrFReconF))):
        '''
        # update merge condition by T. Ishihara 2015.11.10
        '''
        if not sfm_merge_generated or \
            not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
                 countFileAgree > reconParam.vldMergeSmallMinCountFileAgree and \
                 countFileLoc < countFileAgree*reconParam.vldMergeShortRatio and \
                 ((nInlierTmp > reconParam.vldMergeNInliers and ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconFNInliers) or \
                    ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconF) and \
                 ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF):
        '''
        # update merge condition by T. Ishihara 2016.04.02
        '''
        if not sfm_merge_generated or \
            not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
                 ((nInlierTmp > reconParam.vldMergeNInliers and ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconFNInliers) or \
                    ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconF) and \
                 ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF):
        '''
        # update merge condition by T. Ishihara 2016.06.09
        '''
        if not sfm_merge_generated or \
            not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
                 ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF and \
                 nInlierTmp > reconParam.min3DnInliers and \
                 ratioInlierMatchPoints > reconParam.vldMergeRatioInliersMatchPoints):
        '''
        # update merge condition by T. Ishihara 2016.06.20
        if not sfm_merge_generated or \
            not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
                 ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF and \
                 nInlierTmp > reconParam.min3DnInliers):        
            print "Transformed locations do not agree with localization. Skip merge between " + model1.name + " and " + model2.name + "."
            
            '''
            if os.path.isfile(os.path.join(sfmOutPath,"sfm_data.json")):
                os.rename(os.path.join(sfmOutPath,"sfm_data.json"), \
                          os.path.join(sfmOutPath,"sfm_data_("+model1.name + "," + model2.name+").json"))
            '''
                            
            # move to next video
            return False, sfmModelIBeacon("","","","","","","",validMergeRansacThres=0,validMergeRansacThresK=0,
                                          ransacStructureThres=0, ransacStructureThresK=0, 
                                          mergeStructureThres=0, mergeStructureThresK=0)
                
        # generate colorized before bundle adjustment for comparison
        os.system("openMVG_main_ComputeSfM_DataColor " +
            " -i " + os.path.join(sfmOutPath,"sfm_data.json") +
            " -o " + os.path.join(sfmOutPath,"colorized_pre.ply"))        
        
        # TODO : try computing structure from know pose here
        # https://github.com/openMVG/openMVG/issues/246
        # http://openmvg.readthedocs.io/en/latest/software/SfM/ComputeStructureFromKnownPoses/
        
        # TODO : revisit the order of bundle adjustment
        # perform bundle adjustment
        '''
        os.system(reconParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + os.path.join(sfmOutPath,"sfm_data.json") + " " + os.path.join(sfmOutPath,"sfm_data.json") + \
                  " -c=" + "rs,rst,rsti" + " -r=" + "1")
        '''
        os.system(reconParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + os.path.join(sfmOutPath,"sfm_data.json") + " " + os.path.join(sfmOutPath,"sfm_data.json") + \
                  " -c=" + "rst,rsti" + " -r=" + "1")
        
        os.system("openMVG_main_ComputeSfM_DataColor " +
            " -i " + os.path.join(sfmOutPath,"sfm_data.json") +
            " -o " + os.path.join(sfmOutPath,"colorized.ply"))
        
        # write new beacon file
        IBeaconUtils.exportBeaconDataForSfmImageFrames(self.mCsvPath, resultSfMDataFile, os.path.join(self.mInputPath,"listbeacon.txt"),
                                                       os.path.join(sfmOutPath,"beacon.txt"), reconIBeaconParam.normApproach)
        
        return True, sfmModelIBeacon("A" + model1.name + "," + model2.name +"Z", self.mInputImgPath, self.mCsvPath, 
                                     os.path.join(sfmOutPath,"beacon.txt"), self.mMatchesPath, os.path.join(sfmOutPath,"loc"), 
                                     resultSfMDataFile, validMergeRansacThres=model1.validMergeRansacThres,
                                     ransacStructureThres=model1.ransacStructureThres, 
                                     mergeStructureThres=model1.mergeStructureThres)
def main():
    description = 'This script is for converting coordinate information for multiple models.' + \
                'By inputting connecting information for multiple models and local coordinate information for each model, ' + \
                'this script will convert local coordinate information to global coordinate for each model.'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('input_edge_csv', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Input CSV file which have information how each model is connected with other models.')
    parser.add_argument('input_coordinate_csv', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Input CSV file which has file path for input/output coordinate information.')
    args = parser.parse_args()
    input_edge_csv = args.input_edge_csv
    input_coordinate_csv = args.input_coordinate_csv
        
    # read edge information and target coordinate files
    edgeIdList = []
    edgeConnect = {}
    edgeOffsetX = {}
    edgeOffsetY = {}
    edgeAngle = {}
    with open(input_edge_csv, "r") as f:
        reader = csv.reader(f)
        for row in reader:
            if (len(row)!=5):
                print "invalid csv for edge connection information"
                sys.exit()
            
            edgeId = int(row[0])
            edgeIdList.append(edgeId)
            edgeConnect[edgeId] = int(row[1])
            edgeOffsetX[edgeId] = float(row[2])
            edgeOffsetY[edgeId] = float(row[3])
            edgeAngle[edgeId] = float(row[4])
    
    coordFileList = []
    with open(input_coordinate_csv, "r") as f:
        reader = csv.reader(f)
        for row in reader:
            coordFileList.append([row[0], row[1], row[2]])
    
    # calculate transformation matrix for each edge
    originEdgeId = -1
    for edgeId in edgeIdList:    
        if (edgeConnect[edgeId]==-1):
            originEdgeId = edgeId
            break
    if (originEdgeId==-1):
        print "error : cannot find origin edge"
        sys.exit()
    print "origin edge : " + str(originEdgeId)
    
    # path for each edge from the origin
    edgePathList = {}
    for edgeId in edgeIdList:
        paths = []
            
        curPath = edgeId
        while True:
            if (curPath==-1):
                break;
            paths.append(curPath)
            curPath = edgeConnect[curPath]
        
        paths.reverse()
        if (paths[0]!=originEdgeId):
            print "error : first path is not origin edge"
            sys.exit()
        edgePathList[edgeId] = paths
    
    # transform for each edge
    edgeTransforms = {}
    for edgeId in edgeIdList:
        transform = np.array([[1,0,0],[0,1,0],[0,0,1]])
        for idx, curPath in enumerate(edgePathList[edgeId]):
            if (idx>0):
                R = rotationMatrix(edgeAngle[curPath])
                T = np.array([edgeOffsetX[curPath], edgeOffsetY[curPath]])
                RT = np.vstack((np.c_[R,T], np.array([0,0,1])))
                transform = np.dot(transform, RT)
        edgeTransforms[edgeId] = transform
    
    # convert coordinate
    for coordFile in coordFileList:
        edgeId = int(coordFile[0])
        print "edge ID : " + str(edgeId)
        print "path : " + str(edgePathList[edgeId])
        print "transform : "
        print (edgeTransforms[edgeId])
        print "input coordinate file : " + coordFile[1]
        print "output coordinate file : " + coordFile[2]
        
        imageCoordinateList = readImageCoordinateCsv(coordFile[1])
        
        if not os.path.isdir(os.path.dirname(coordFile[2])):
            FileUtils.makedir(coordFile[2])
        
        with open(coordFile[2],"w") as outfile:
            for imageCoordinate in imageCoordinateList:
                hcoor = np.array([imageCoordinate[1][0], imageCoordinate[1][1], 1.0])
                gcoor = np.dot(edgeTransforms[edgeId], hcoor)
                outfile.write(imageCoordinate[0] + " "  + str(gcoor[0]) + " "  + str(gcoor[1]) + " "  \
                              + str(imageCoordinate[1][2]) + "\n")
            outfile.close()
Example #25
0
def mergeModel(sfm_data_dirA, sfm_data_dirB, locFolderB, outfile, ransacThres, mergePointThres, ransacRoundMul=100, inputImgDir="", minLimit=4, svdRatio=1.75):
    
    print "Loading sfm_data"
    sfm_dataB = FileUtils.loadjson(sfm_data_dirB)
    
    # read matching pairs from localization result
    imgnameB, matchlistB = readMatch(locFolderB)
    
    # get viewID from image name for model B
    viewIDB = imgnameToViewID(imgnameB, sfm_dataB)

    # get mapping between viewID,featID to 3D point ID
    viewFeatMapB = getViewFeatTo3DMap(sfm_dataB)

    # find consistent match between 3D of model B to 3D of model A
    print "Calculating consistent 3D matches"
    match3D_BA = getConsistent3DMatch(viewIDB, matchlistB, viewFeatMapB)
    print "Found " + str(len(match3D_BA)) + " consistent matches"
    
    # not enough matches
    if len(match3D_BA) <= 4 or len(match3D_BA) <= minLimit:
        return len(match3D_BA), len(match3D_BA), np.asarray([])
 
    # move the load of larger model here to reduce time if merging is not possible
    sfm_dataA = FileUtils.loadjson(sfm_data_dirA)
 
    # get 3D point. Note that element 0 of each pair in match3D_BA
    # is 3D pt ID of model B and element 1 is that of model A
    print "Load 3D points"
    pointA = get3DPointloc(sfm_dataA, [x[1] for x in match3D_BA])
    pointB = get3DPointloc(sfm_dataB, [x[0] for x in match3D_BA])
    
    pointAn = np.asarray(pointA, dtype=np.float).T
    pointBn = np.asarray(pointB, dtype=np.float).T
        
    # find robust transformation
    print "Find transformation with RANSAC"
    ransacRound = len(match3D_BA)*ransacRoundMul
    print "Number of RANSAC round : " + str(ransacRound)
    M, inliers = ransacTransform(pointAn, pointBn, ransacThres, ransacRound, svdRatio)
    
    # cannot find RANSAC transformation
    if (M.size==0):
        return len(match3D_BA), len(match3D_BA), np.asarray([])
    print M
    
    # stop if not enough inliers
    sSvd = np.linalg.svd(M[0:3,0:3],compute_uv=0)
    # fixed by T.Ishihara to use minLimit 2016.06.06
    #if len(inliers) <= 4 or sSvd[0]/sSvd[-1] > svdRatio:
    if len(inliers) <= minLimit or sSvd[0]/sSvd[-1] > svdRatio:
        return len(match3D_BA), len(inliers), M
        
    # perform merge 
    # last argument is map from inliers 3D pt Id of model B to that of model A
    print "Merging sfm_data"
    # fixed by T. Ishihara, use different parameter to find ransac inlier and merge points inliers
    '''
    merge_sfm_data(sfm_dataA, sfm_dataB, M, {match3D_BA[x][0]: match3D_BA[x][1] for x in inliers})
    '''
    mergePointInliers = getInliersByAffineTransform(pointAn, pointBn, M, mergePointThres)
    merge_sfm_data(sfm_dataA, sfm_dataB, M, {match3D_BA[x][0]: match3D_BA[x][1] for x in mergePointInliers})
    
    # change input image folder
    if inputImgDir != "":
        sfm_dataA["root_path"] = inputImgDir
    
    # save json file
    print "Saving json file"
    FileUtils.savejson(sfm_dataA,outfile)
    
    # return number of inliers for transformation
    return len(match3D_BA), len(inliers), M
def main():
    description = 'This script is for calcularing the matrix for converting 3D model to world coordinate and testing localization.' + \
        'Before running this script, please prepare the text file which has image names and 3D coordinate where photos are taken in Ref folder.'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('project_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where OpenMVG project is located.')
    parser.add_argument('matches_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where OpenMVG created matches files.')
    parser.add_argument('sfm_data_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where OpenMVG sfm_data.json is located.')
    parser.add_argument('-t', '--test-project-dir', action='store', nargs='?', const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where localization test image files are located.')
    parser.add_argument('-o', '--output-json-filename', action='store', nargs='?', const=None, \
                        default='loc_global.json', type=str, choices=None, metavar=None, \
                        help='Output localization result json filename.')
    parser.add_argument('--bow', action='store_true', default=False, \
                        help='Use BOW to accelerate localization if this flag is set (default: False)')
    parser.add_argument('--beacon', action='store_true', default=False, \
                        help='Use iBeacon to accelerate localization if this flag is set (default: False)')
    args = parser.parse_args()
    project_dir = args.project_dir
    matches_dir = args.matches_dir
    sfm_data_dir = args.sfm_data_dir
    test_project_dir = args.test_project_dir
    output_json_filename = args.output_json_filename
    USE_BOW = args.bow
    USE_BEACON = args.beacon

    BOW_FILE = os.path.join(matches_dir, "BOWfile.yml")
    PCA_FILE = os.path.join(matches_dir, "PCAfile.yml")
    SFM_BEACON_FILE = sfm_data_dir + "/beacon.txt"
    REF_FOLDER = project_dir + "/Ref"

    if USE_BOW and not os.path.isfile(BOW_FILE):
        print "Use BOW flag is set, but cannot find BOW model file"
        sys.exit()
    if USE_BEACON and not os.path.isfile(SFM_BEACON_FILE):
        print "Use iBeacon flag is set, but cannot find beacon signal file for SfM data"
        sys.exit()

    if not os.path.isfile(os.path.join(REF_FOLDER, "Amat.txt")):

        # 1. find transformation between reconstructed coordinate and world coordinate

        # 1.1 localize reference images
        REF_FOLDER_LOC = os.path.join(REF_FOLDER, "loc")
        if os.path.isdir(REF_FOLDER_LOC):
            shutil.rmtree(REF_FOLDER_LOC)
        os.mkdir(REF_FOLDER_LOC)

        guideMatchOption = ""
        if reconstructParam.bGuidedMatchingLocalize:
            guideMatchOption = " -gm"
        if USE_BOW and not USE_BEACON:
            os.system(reconstructParam.LOCALIZE_PROJECT_PATH + \
                      " " + os.path.join(REF_FOLDER,"inputImg") + \
                      " " + sfm_data_dir + \
                      " " + matches_dir + \
                      " " + REF_FOLDER_LOC + \
                      " -f=" + str(localizeParam.locFeatDistRatio) + \
                      " -r=" + str(localizeParam.locRansacRound) + \
                      " -k=" + str(localizeBOWParam.locKNNnum) + \
                      " -a=" + BOW_FILE + \
                      " -p=" + PCA_FILE + \
                      guideMatchOption)
        elif not USE_BOW and USE_BEACON:
            os.system(reconstructIBeaconParam.LOCALIZE_PROJECT_PATH + \
                      " " + os.path.join(REF_FOLDER,"inputImg") + \
                      " " + sfm_data_dir + \
                      " " + matches_dir + \
                      " " + REF_FOLDER_LOC + \
                      " -f=" + str(localizeParam.locFeatDistRatio) + \
                      " -r=" + str(localizeParam.locRansacRound) + \
                      " -b=" + SFM_BEACON_FILE + \
                      " -e=" + os.path.join(REF_FOLDER,"csv") + \
                      " -k=" + str(localizeIBeaconParam.locKNNnum) + \
                      " -c=" + str(localizeIBeaconParam.coocThres) + \
                      " -v=" + str(localizeIBeaconParam.locSkipSelKNN) + \
                      " -n=" + str(localizeIBeaconParam.normApproach) + \
                      guideMatchOption)
        elif USE_BOW and USE_BEACON:
            os.system(reconstructIBeaconParam.LOCALIZE_PROJECT_PATH + \
                      " " + os.path.join(REF_FOLDER,"inputImg") + \
                      " " + sfm_data_dir + \
                      " " + matches_dir + \
                      " " + REF_FOLDER_LOC + \
                      " -f=" + str(localizeParam.locFeatDistRatio) + \
                      " -r=" + str(localizeParam.locRansacRound) + \
                      " -b=" + SFM_BEACON_FILE + \
                      " -e=" + os.path.join(REF_FOLDER,"csv") + \
                      " -k=" + str(localizeIBeaconParam.locKNNnum) + \
                      " -c=" + str(localizeIBeaconParam.coocThres) + \
                      " -v=" + str(localizeIBeaconParam.locSkipSelKNN) + \
                      " -n=" + str(localizeIBeaconParam.normApproach) + \
                      " -kb=" + str(localizeBOWParam.locKNNnum) + \
                      " -a=" + BOW_FILE + \
                      " -p=" + PCA_FILE + \
                      guideMatchOption)
        else:
            os.system(reconstructParam.LOCALIZE_PROJECT_PATH + \
                      " " + os.path.join(REF_FOLDER,"inputImg") + \
                      " " + sfm_data_dir + \
                      " " + matches_dir + \
                      " " + REF_FOLDER_LOC + \
                      " -f=" + str(localizeParam.locFeatDistRatio) + \
                      " -r=" + str(localizeParam.locRansacRound) + \
                      guideMatchOption)

        # extract centers from all json file and write to a file
        fileLoc = open(os.path.join(REF_FOLDER_LOC, "center.txt"), "w")
        countLocFrame = 0

        for filename in sorted(os.listdir(REF_FOLDER_LOC)):
            if filename[-4:] != "json":
                continue

            countLocFrame = countLocFrame + 1
            with open(os.path.join(REF_FOLDER_LOC, filename)) as locJson:
                locJsonDict = json.load(locJson)
                if "t" in locJsonDict:
                    loc = locJsonDict["t"]
                    fileLoc.write(
                        str(loc[0]) + " " + str(loc[1]) + " " + str(loc[2]) +
                        " 255 0 0\n")

        fileLoc.close()

        # read reference data
        mapNameLocRef = FileUtils.loadImageLocationListTxt(
            os.path.join(REF_FOLDER, "refcoor.txt"))

        # read localized json file and find its matching world coordinate
        worldCoor = []
        locCoor = []
        countLoc = 0
        for filename in os.listdir(REF_FOLDER_LOC):
            if filename[-4:] != "json":
                continue

            # read json localization file
            with open(os.path.join(REF_FOLDER_LOC, filename)) as jsonlocfile:
                jsonLoc = json.load(jsonlocfile)

                imgLocName = os.path.basename(jsonLoc["filename"])

                # if file exist in map, add to matrix
                if imgLocName in mapNameLocRef:
                    if "t" in jsonLoc:
                        locCoor.append(jsonLoc["t"])
                        worldCoor.append(mapNameLocRef[imgLocName])
                        countLoc = countLoc + 1

        print "From " + str(len(mapNameLocRef)) + " reference images, " + str(
            countLoc) + " images has been localized."

        if countLoc < 4:
            print "Cannot fix to world coordinate because of less than 4 reference points"
            return

        # find tranformation
        Amat, inliers = mergeSfM.ransacTransform(
            np.array(worldCoor).T,
            np.array(locCoor).T,
            reconstructParam.ransacThresTransformWorldCoordinateRefImage,
            ransacRound=1000)

        if len(inliers) < 4:
            print "Cannot estimate transformation matrix to world coordinate"
            print Amat
            return

        print "Transformation matrix has " + str(len(inliers)) + "inliers"
        print Amat

        with open(os.path.join(REF_FOLDER, "Amat.txt"), "w") as AmatFile:
            np.savetxt(AmatFile, Amat)
        FileUtils.convertNumpyMatTxt2OpenCvMatYml(
            os.path.join(REF_FOLDER, "Amat.txt"),
            os.path.join(REF_FOLDER, "Amat.yml"), "A")
    else:
        with open(os.path.join(REF_FOLDER, "Amat.txt"), "r") as AmatFile:
            Amat = np.loadtxt(AmatFile)

    # convert ply file to world coordinate
    SfmDataUtils.saveGlobalSfM(
        os.path.join(sfm_data_dir, "sfm_data.json"),
        os.path.join(REF_FOLDER, "Amat.txt"),
        os.path.join(sfm_data_dir, "sfm_data_global.json"))
    os.system("openMVG_main_ComputeSfM_DataColor -i " +
              os.path.join(sfm_data_dir, "sfm_data_global.json") + " -o " +
              os.path.join(sfm_data_dir, "colorized_global.ply"))
    PlyUtis.saveCameraPly(
        os.path.join(sfm_data_dir, "sfm_data_global.json"),
        os.path.join(sfm_data_dir, "colorized_global_camera.ply"))
    PlyUtis.saveStructurePly(
        os.path.join(sfm_data_dir, "sfm_data_global.json"),
        os.path.join(sfm_data_dir, "colorized_global_structure.ply"))

    # start localize test
    if test_project_dir:
        countFrameTotal = 0
        countLocFrameTotal = 0
        log_txt_filename = os.path.join(test_project_dir, "log.txt")
        if os.path.exists(log_txt_filename):
            os.remove(log_txt_filename)

        for testFolder in sorted(os.listdir(test_project_dir)):
            TEST_DIR = os.path.join(test_project_dir, testFolder)

            if not os.path.exists(os.path.join(TEST_DIR, "inputImg")):
                continue

            TEST_FOLDER_LOC = os.path.join(TEST_DIR, "loc")
            if not os.path.isfile(os.path.join(TEST_FOLDER_LOC, "center.txt")):

                # localize test images
                if os.path.isdir(TEST_FOLDER_LOC):
                    shutil.rmtree(TEST_FOLDER_LOC)
                os.mkdir(TEST_FOLDER_LOC)

                if USE_BOW and not USE_BEACON:
                    os.system(reconstructParam.LOCALIZE_PROJECT_PATH + \
                              " " + os.path.join(TEST_DIR,"inputImg") + \
                              " " + sfm_data_dir + \
                              " " + matches_dir + \
                              " " + TEST_FOLDER_LOC + \
                              " -f=" + str(localizeParam.locFeatDistRatio) + \
                              " -r=" + str(localizeParam.locRansacRound) + \
                              " -k=" + str(localizeBOWParam.locKNNnum) + \
                              " -a=" + BOW_FILE + \
                              " -p=" + PCA_FILE)
                elif not USE_BOW and USE_BEACON:
                    os.system(reconstructIBeaconParam.LOCALIZE_PROJECT_PATH + \
                              " " + os.path.join(TEST_DIR,"inputImg") + \
                              " " + sfm_data_dir + \
                              " " + matches_dir + \
                              " " + TEST_FOLDER_LOC + \
                              " -f=" + str(localizeParam.locFeatDistRatio) + \
                              " -r=" + str(localizeParam.locRansacRound) + \
                              " -b=" + SFM_BEACON_FILE + \
                              " -e=" + os.path.join(TEST_DIR,"csv") + \
                              " -k=" + str(localizeIBeaconParam.locKNNnum) + \
                              " -c=" + str(localizeIBeaconParam.coocThres) + \
                              " -v=" + str(localizeIBeaconParam.locSkipSelKNN) + \
                              " -n=" + str(localizeIBeaconParam.normApproach))
                elif USE_BOW and USE_BEACON:
                    os.system(reconstructIBeaconParam.LOCALIZE_PROJECT_PATH + \
                              " " + os.path.join(TEST_DIR,"inputImg") + \
                              " " + sfm_data_dir + \
                              " " + matches_dir + \
                              " " + TEST_FOLDER_LOC + \
                              " -f=" + str(localizeParam.locFeatDistRatio) + \
                              " -r=" + str(localizeParam.locRansacRound) + \
                              " -b=" + SFM_BEACON_FILE + \
                              " -e=" + os.path.join(TEST_DIR,"csv") + \
                              " -k=" + str(localizeIBeaconParam.locKNNnum) + \
                              " -c=" + str(localizeIBeaconParam.coocThres) + \
                              " -v=" + str(localizeIBeaconParam.locSkipSelKNN) + \
                              " -n=" + str(localizeIBeaconParam.normApproach) + \
                              " -kb=" + str(localizeBOWParam.locKNNnum) + \
                              " -a=" + BOW_FILE + \
                              " -p=" + PCA_FILE)
                else:
                    os.system(reconstructParam.LOCALIZE_PROJECT_PATH + \
                              " " + os.path.join(TEST_DIR,"inputImg") + \
                              " " + sfm_data_dir + \
                              " " + matches_dir + \
                              " " + TEST_FOLDER_LOC + \
                              " -f=" + str(localizeParam.locFeatDistRatio) + \
                              " -r=" + str(localizeParam.locRansacRound))

                # extract centers from all json file and write to a file
                countLocFrame = 0
                fileLoc = open(os.path.join(TEST_FOLDER_LOC, "center.txt"),
                               "w")
                for filename in sorted(os.listdir(TEST_FOLDER_LOC)):
                    if filename[-4:] != "json":
                        continue

                    countLocFrame = countLocFrame + 1
                    with open(os.path.join(TEST_FOLDER_LOC,
                                           filename)) as locJson:
                        if "t" in locJsonDict:
                            locJsonDict = json.load(locJson)
                            loc = locJsonDict["t"]
                            fileLoc.write(
                                str(loc[0]) + " " + str(loc[1]) + " " +
                                str(loc[2]) + " 255 0 0\n")
                fileLoc.close()

                # count input images
                imageTypes = ("*.jpg", "*.JPG", "*.jpeg", "*.JPEG", "*.png",
                              "*.PNG")
                imageFiles = []
                for imageType in imageTypes:
                    imageFiles.extend(
                        glob.glob(os.path.join(TEST_DIR, "inputImg",
                                               imageType)))
                countFrame = len(imageFiles)

                # write log file
                with open(log_txt_filename, "a") as logfile:
                    logfile.write("result for : " + TEST_DIR + "\n")
                    logfile.write("number of localized frame : " +
                                  str(countLocFrame) + "/" + str(countFrame) +
                                  "\n")
                    logfile.write("ratio of localized frame : " +
                                  str(float(countLocFrame) / countFrame) +
                                  "\n")
                countFrameTotal += countFrame
                countLocFrameTotal += countLocFrame

            # convert all localization results to world coordinate and merge to one json file
            locGlobalJsonObj = {}
            locGlobalJsonObj["locGlobal"] = []
            locGlobalPoints = []
            for filename in sorted(os.listdir(TEST_FOLDER_LOC)):
                if filename[-4:] != "json":
                    continue
                with open(os.path.join(TEST_FOLDER_LOC, filename)) as jsonfile:
                    jsonLoc = json.load(jsonfile)

                    if "t" in jsonLoc:
                        jsonLoc["t_relative"] = jsonLoc["t"]
                        jsonLoc["R_relative"] = jsonLoc["R"]
                        jsonLoc["t"] = np.dot(
                            Amat, np.concatenate([jsonLoc["t"],
                                                  [1]])).tolist()
                        jsonLoc["R"] = np.dot(jsonLoc["R"],
                                              Amat[:, 0:3].T).tolist()

                        locGlobalPoints.append(jsonLoc["t"])

                    locGlobalJsonObj["locGlobal"].append(jsonLoc)
            with open(os.path.join(TEST_FOLDER_LOC, output_json_filename),
                      "w") as jsonfile:
                json.dump(locGlobalJsonObj, jsonfile)

            # save localization results to ply file
            PlyUtis.addPointToPly(
                os.path.join(sfm_data_dir, "colorized_global_structure.ply"),
                locGlobalPoints,
                os.path.join(TEST_FOLDER_LOC, "colorized_global_localize.ply"))

        # write log file
        with open(log_txt_filename, "a") as logfile:
            logfile.write("total result" + "\n")
            logfile.write("number of localized frame : " +
                          str(countLocFrameTotal) + "/" +
                          str(countFrameTotal) + "\n")
            logfile.write("ratio of localized frame : " +
                          str(float(countLocFrameTotal) / countFrameTotal) +
                          "\n")
Example #27
0
    def mergeModel(self,
                   image_descFile,
                   inputPath,
                   outputPath,
                   reconParam=ReconstructParam,
                   reconBOWParam=ReconstructBOWParam):
        print "Begin merging models"

        FileUtils.makedir(self.mInputImgPath)
        FileUtils.makedir(self.mCsvPath)
        FileUtils.makedir(self.mMatchesPath)
        FileUtils.makedir(self.mSfMPath)

        # create symbolic links to all images, csv, and descriptor/feature files
        os.system("cp --remove-destination -s " +
                  os.path.join(inputPath, "*", "inputImg", "*") + " " +
                  self.mInputImgPath)
        os.system("cp --remove-destination -s " +
                  os.path.join(inputPath, "*", "csv", "*") + " " +
                  self.mCsvPath)
        os.system("cp --remove-destination -s " +
                  os.path.join(outputPath, "*", "matches", "*.desc") + " " +
                  self.mMatchesPath)
        os.system("cp --remove-destination -s " +
                  os.path.join(outputPath, "*", "matches", "*.feat") + " " +
                  self.mMatchesPath)
        os.system("cp --remove-destination -s " +
                  os.path.join(outputPath, "*", "matches", "*.bow") + " " +
                  self.mMatchesPath)

        # copy image_describer.txt
        os.system("cp --remove-destination " + image_descFile + " " +
                  self.mMatchesPath)

        listLead = range(
            0, len(self.sfmModel)
        )  # list of model indexes which can initiate merge (list of model indexes which did not fail merge yet)
        listBye = [
        ]  # list of model indexes which will not be used to initiate merge (list of model indexes which already failed merge)
        baseVideo = -1
        mergeCandidatesRemainsForBaseVideo = True
        calcGraphEdges = False

        while True:
            # update model indexes which are not used to initiate merge
            if not mergeCandidatesRemainsForBaseVideo:
                listBye.append(self.sfmModel[baseVideo].name)

            listName = [(x, self.sfmModel[x].name)
                        for x in range(0, len(self.sfmModel))]
            listLead = [x[0] for x in listName if x[1] not in listBye]

            # if there was a merge, recalculate the cooccurence graph
            if mergeCandidatesRemainsForBaseVideo:
                # calculate cooccurence graph
                if not calcGraphEdges:
                    graphEdges = self.calcGraph()
                    calcGraphEdges = True

                print "graph edges : " + str(graphEdges)
                print "SfM model names : " + str(
                    [x.name for x in self.sfmModel])
                connectionGraph = (graphEdges > 0.0)

                # calculate connected component on graph
                ccLabel = scipy.sparse.csgraph.connected_components(
                    connectionGraph, directed=False)[1]

            # if nore more mergable components
            if len(np.unique(ccLabel)) == len(ccLabel):
                print "No more mergable components. Exiting."
                return

            # sort the length of reconstructed frames in each video
            # from small to large to find the base Video
            reconFrameLenList = [
                len(self.sfmModel[i].reconFrame)
                for i in range(0, len(self.sfmModel))
            ]
            reconFrameLenIdx = [
                x[0] for x in sorted(enumerate(reconFrameLenList),
                                     key=lambda y: y[1])
            ]

            # find first base video that has a connected component
            baseVideo = ""
            for video in reconFrameLenIdx:
                if np.sum(ccLabel == ccLabel[video]) > 1 and video in listLead:
                    baseVideo = video
                    break

            # this should never be called since program should exit
            # if there is no connected components in grap
            if baseVideo == "":
                print "Cannot find connected component to merge. Exiting."
                return

            # get videos that connect to this baseVideo
            # and sort the from smallest to largest as merge order
            neighborVec = np.where(connectionGraph[baseVideo, :])[0]
            neighborVec = neighborVec[
                neighborVec != baseVideo]  # prevent selecting itself to merge
            mergeCandidate = neighborVec.tolist()
            nReconFrameMergeCand = [
                len(self.sfmModel[x].reconFrame) for x in mergeCandidate
            ]
            orderMergeCand = [
                x[0] for x in sorted(enumerate(nReconFrameMergeCand),
                                     key=lambda y: y[1])
            ]
            mergeCandidateModel = [
                self.sfmModel[mergeCandidate[i]] for i in orderMergeCand
            ]

            mergedModel = self.sfmModel[baseVideo]

            print "Based model: " + mergedModel.name
            print "To merge with: " + str(
                [x.name for x in mergeCandidateModel])
            mergeCandidatesRemainsForBaseVideo = False
            for video in mergeCandidateModel:

                # check if failed localization has been performed on this pair before
                # if so, skip this localization
                if self.isBadMatch(video, mergedModel):
                    continue

                # swap order so small model is merged to larger model
                swap = False
                if len(mergedModel.reconFrame) < len(video.reconFrame):
                    tmp = mergedModel
                    mergedModel = video
                    video = tmp
                    swap = True

                # attempt merge
                mergeResult, mergedModelTmp = self.mergeOneModel(
                    mergedModel, video, reconParam, reconBOWParam)

                if mergeResult:
                    mergedModel.update(mergedModelTmp)
                    videoIdx = self.sfmModel.index(video)
                    del self.sfmModel[videoIdx]

                    # update graph
                    graphEdges = np.delete(graphEdges, videoIdx, 0)
                    graphEdges = np.delete(graphEdges, videoIdx, 1)

                    self.nMergedModel = self.nMergedModel + 1
                    self.save(
                        os.path.join(self.mSfMPath,
                                     "global" + str(self.nMergedModel - 1),
                                     "mergeGraph.txt"))
                    self.save(os.path.join(self.mSfMPath, "mergeGraph.txt"))
                    mergeCandidatesRemainsForBaseVideo = True

                    # reset listBye to allow small model to merge to new large model
                    listBye = []

                    # write result log file
                    with open(os.path.join(self.mSfMPath, "logRecon.txt"),
                              "a") as outLogFile:
                        outLogFile.write(
                            str(self.nMergedModel - 1) + " " +
                            mergedModel.name + "\n")

                    # start again
                    break
                else:
                    # add to bad matches
                    self.badMatches.append([video.name, mergedModel.name])

                    # save
                    self.save(os.path.join(self.mSfMPath, "mergeGraph.txt"))

                    if swap:
                        # swap back if not merged
                        mergedModel = video
Example #28
0
def main():
    # set default parameter
    reconstructParam = ReconstructParam.ReconstructParam
    reconstructBOWParam = ReconstructBOWParam.ReconstructBOWParam

    # parse parameters
    description = 'This script is for reconstruct 3D models from multiple videos and merge to one 3D model. ' + \
                'BOW is used for accelerating 3D model merge. ' + \
                'Please prepare multiple videos in Input folder.'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('project_path', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where your 3D model project is stored.')
    parser.add_argument('-k', '--path-camera-file', action='store', nargs='?', const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='File path where camera matrix is stored in Numpy text format. (default: focal length ' + \
                            str(reconstructParam.focalLength) + ' will be used)')
    parser.add_argument('--bow', action='store_true', default=False, \
                        help='Use BOW to accelerate 3D model merge if this flag is set (default: False)')
    args = parser.parse_args()
    PROJECT_PATH = args.project_path
    USE_BOW = args.bow
    PATH_CAMERA_FILE = args.path_camera_file

    if PATH_CAMERA_FILE:
        if os.path.exists(PATH_CAMERA_FILE):
            with open(PATH_CAMERA_FILE, "r") as camMatFile:
                K = np.loadtxt(camMatFile)
            if K.shape[0] != 3 or K.shape[1] != 3:
                print "Error : invalid camera matrix size = " + str(K)
                sys.exit()
            print "Focal length " + str(K[0][0]) + " is set for reconstruction"
            reconstructParam.focalLength = K[0][0]
        else:
            print "Error : invalid camera matrix file = " + PATH_CAMERA_FILE
            sys.exit()

    # get paths
    inputPath = os.path.join(PROJECT_PATH, "Input")
    outputPath = os.path.join(PROJECT_PATH, "Output")

    FileUtils.makedir(outputPath)

    # reconstruct all videos
    listVideo = sorted(os.listdir(inputPath))
    for video in listVideo:
        if not os.path.isdir(os.path.join(inputPath, video)):
            continue

        print "Begin reconstructing video : " + video

        sfm_mainDir = os.path.join(outputPath, video)
        sfm_inputDir = os.path.join(inputPath, video)
        sfm_inputImgDir = os.path.join(sfm_inputDir, "inputImg")
        sfm_matchesDir = os.path.join(sfm_mainDir, "matches")
        sfm_sfmDir = os.path.join(sfm_mainDir, "SfM")
        sfm_reconstructDir = os.path.join(sfm_sfmDir, "reconstruction")
        sfm_globalDir = os.path.join(sfm_reconstructDir, "global")

        FileUtils.makedir(sfm_mainDir)
        FileUtils.makedir(sfm_inputImgDir)
        FileUtils.makedir(sfm_matchesDir)
        FileUtils.makedir(sfm_sfmDir)
        FileUtils.makedir(sfm_reconstructDir)
        FileUtils.makedir(sfm_globalDir)

        if not os.path.isfile(os.path.join(sfm_globalDir, "sfm_data.json")):
            # list images
            os.system("openMVG_main_SfMInit_ImageListing -i " +
                      sfm_inputImgDir + " -o " + sfm_matchesDir + " -d " +
                      reconstructParam.CAMERA_DATABASE_PATH)

            # 1.1 Check intrinsic
            # ( if camera parameter not specified then replace with fixed camera.
            # and set appropriate width and height)
            with open(os.path.join(sfm_matchesDir,
                                   "sfm_data.json")) as sfm_data_file:
                sfm_data = json.load(sfm_data_file)
                hImg = sfm_data["views"][0]['value']['ptr_wrapper']['data'][
                    "height"]
                wImg = sfm_data["views"][0]['value']['ptr_wrapper']['data'][
                    "width"]
                if len(sfm_data["intrinsics"]) == 0:
                    for view in sfm_data["views"]:
                        view["value"]["ptr_wrapper"]["data"][
                            "id_intrinsic"] = 0

                    sfm_data["intrinsics"].append({})
                    sfm_data["intrinsics"][0]["key"] = 0
                    sfm_data["intrinsics"][0]["values"] = {}
                    # sfm_data["intrinsics"][0]["values"]["polymorphic_name"] = "pinhole_radial_k3"
                    sfm_data["intrinsics"][0]["values"][
                        "polymorphic_name"] = "pinhole"
                    sfm_data["intrinsics"][0]["values"][
                        "polymorphic_id"] = 2147999999
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"] = {}
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"][
                        "id"] = 2147483660
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"][
                        "data"] = {}
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"][
                        "width"] = wImg
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"][
                        "height"] = hImg
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"][
                        "focal_length"] = reconstructParam.focalLength
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"][
                        "disto_k3"] = [0, 0, 0]
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"][
                        "principal_point"] = [wImg / 2, hImg / 2]

            with open(os.path.join(sfm_matchesDir, "sfm_data.json"),
                      "w") as sfm_data_file:
                json.dump(sfm_data, sfm_data_file)

            # 2 - Features computation and matching
            # ( Compute a list of features & descriptors for each image)
            os.system(reconstructParam.EXTRACT_FEATURE_MATCH_PROJECT_PATH + \
                      " " + sfm_matchesDir + \
                      " -mf=" + str(reconstructParam.maxTrackletMatchDistance) + \
                      " -mm=" + str(reconstructParam.minMatchToRetain) + \
                      " -f=" + str(reconstructParam.extFeatDistRatio) + \
                      " -r=" + str(reconstructParam.extFeatRansacRound))

            # OpenMVG assumes matches.e.txt for global reconstruction, matches.f.txt for incremental reconstruction
            os.system("cp " + os.path.join(sfm_matchesDir, "matches.f.txt") +
                      " " + os.path.join(sfm_matchesDir, "matches.e.txt"))

            # 3 - Global reconstruction
            countRecon = 1
            while not os.path.isfile(
                    os.path.join(sfm_globalDir, "sfm_data.json")
            ) and countRecon < reconstructParam.rerunRecon:
                os.system("openMVG_main_GlobalSfM -i " +
                          os.path.join(sfm_matchesDir, "sfm_data.json") +
                          " -m " + sfm_matchesDir + " -o " + sfm_globalDir)
                countRecon = countRecon + 1
                time.sleep(1)

            if not os.path.isfile(os.path.join(sfm_globalDir,
                                               "sfm_data.json")):
                continue

            # 4 - Color the pointcloud
            os.system("openMVG_main_ComputeSfM_DataColor -i " +
                      os.path.join(sfm_globalDir, "sfm_data.json") + " -o " +
                      os.path.join(sfm_globalDir, "colorized.ply"))

            # 4.5 remove part of reconstruction where it is incorrect
            # Specifically,sometimes when their matching is not adequate,
            # the reconstructed model will be divided into two or more models
            # with different scale and a "jump" between pose translation.
            # This function detects such jump and retain the the largest
            # beginning or ending part of reconstruction, while the rest
            # should be reconstructed separately by cleanSfM.
            countCut = 0
            # keep cutting until no more cut
            while cleanSfM.cutSfMDataJump(
                    os.path.join(sfm_globalDir, "sfm_data.json"),
                    bufferFrame=reconstructParam.bufferFrame):
                countCut = countCut + 1
                os.rename(
                    os.path.join(sfm_globalDir, "sfm_data_BC.json"),
                    os.path.join(sfm_globalDir,
                                 "sfm_data_BC" + str(countCut) + ".json"))
                os.system(reconstructParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + \
                          " " + os.path.join(sfm_globalDir, "sfm_data.json") + \
                          " " + os.path.join(sfm_globalDir, "sfm_data.json") + \
                          " -c=" + "rs,rst,rsti")
            os.system("openMVG_main_ComputeSfM_DataColor -i " +
                      os.path.join(sfm_globalDir, "sfm_data.json") + " -o " +
                      os.path.join(sfm_globalDir, "colorized_AC.ply"))

            # 5 - Clean sfm_data by removing viewID of frames that are not used
            # in reconstruction and put them in another folder and reconstruct them again
            # note that sfm_data.json in matches folder is renamed and kept as reference
            unusedImg = cleanSfM.cleanSfM(
                os.path.join(sfm_globalDir, "sfm_data.json"), [
                    os.path.join(sfm_matchesDir, "matches.putative.txt"),
                    os.path.join(sfm_matchesDir, "matches.e.txt"),
                    os.path.join(sfm_matchesDir, "matches.f.txt")
                ])

            # 6. move unused images, csv files into a new folder unless they have less than x images
            for i in range(0, len(unusedImg)):
                listUnused = unusedImg[i]
                if len(listUnused) < reconstructParam.minUnusedImgLength:
                    continue

                # set name for new video
                if i == 0:
                    newVidName = video + "_front"
                elif i == 1:
                    newVidName = video + "_back"
                else:
                    # this should not be called
                    continue

                # set path
                pathNewVid = os.path.join(inputPath, newVidName)

                # skip if there is already this folder
                if os.path.isdir(pathNewVid):
                    continue

                print "Extract unused part of " + video + " into " + newVidName

                FileUtils.makedir(pathNewVid)

                csvNewVid = os.path.join(pathNewVid, "csv")
                imgNewVid = os.path.join(pathNewVid, "inputImg")
                FileUtils.makedir(csvNewVid)
                FileUtils.makedir(imgNewVid)

                # copy image in list and csv file
                os.system("cp -s " +
                          os.path.join(sfm_inputDir, "csv", "*.csv") + " " +
                          csvNewVid)
                for unusedFilename in listUnused:
                    os.system("cp -s " +
                              os.path.join(sfm_inputImgDir, unusedFilename) +
                              " " + imgNewVid)

                # append the folder into reconstruction queue
                listVideo.append(newVidName)

    # train bag of words model, and extract bag of words feature for all images
    if USE_BOW and not os.path.isfile(
            os.path.join(outputPath, "merge_result", "Output", "matches",
                         "BOWfile.yml")):
        outputBowPath = os.path.join(outputPath, "merge_result", "Output",
                                     "matches")
        if not os.path.isdir(outputBowPath):
            FileUtils.makedir(outputBowPath)
        print "Execute Training BOW : " + reconstructParam.WORKSPACE_DIR + "/TrainBoW/Release/TrainBoW " + outputPath + " " + \
                  os.path.join(outputBowPath, "BOWfile.yml") + " -p=" + os.path.join(outputBowPath, "PCAfile.yml")
        os.system(reconstructParam.WORKSPACE_DIR + "/TrainBoW/Release/TrainBoW " + outputPath + " " + \
                  os.path.join(outputBowPath, "BOWfile.yml") + " -p=" + os.path.join(outputBowPath, "PCAfile.yml"))

    # load graph structure from "mergeGraph.txt" if it exists
    # create new graph structure if it does not exist
    if os.path.isfile(
            os.path.join(outputPath, "merge_result", "Output", "SfM",
                         "reconstruction", "mergeGraph.txt")):
        if USE_BOW:
            sfmGraph = sfmMergeGraphBOW.sfmGraphBOW.load(
                os.path.join(outputPath, "merge_result", "Output", "SfM",
                             "reconstruction", "mergeGraph.txt"))
        else:
            sfmGraph = sfmMergeGraph.sfmGraph.load(
                os.path.join(outputPath, "merge_result", "Output", "SfM",
                             "reconstruction", "mergeGraph.txt"))
        sfmGraph.workspacePath = reconstructParam.WORKSPACE_DIR

        #### start of manually adding new model code ####
        # In current code, you cannot add new 3D model once you start merging.
        # Enable following commented code to add new 3D model after you already started merging.
        '''
        newModelToAdd = []
        for newModelName in newModelToAdd:
            addModel(newModelName,os.path.join(inputPath,newModelName),os.path.join(outputPath,newModelName))
        sfmGraph.clearBadMatches()
        '''
        ### end of manually adding new model code ###
    else:
        if USE_BOW:
            sfmGraph = sfmMergeGraphBOW.sfmGraphBOW(
                inputPath, outputPath,
                os.path.join(outputPath, "merge_result", "Input"),
                os.path.join(outputPath, "merge_result", "Output", "SfM",
                             "reconstruction"),
                os.path.join(outputPath, "merge_result", "Output", "matches"),
                os.path.join(outputPath, "merge_result", "Input", "csv"),
                os.path.join(outputPath, "merge_result", "Input",
                             "inputImg"), reconstructParam.WORKSPACE_DIR,
                reconstructParam.minReconFrameToAdd)
        else:
            sfmGraph = sfmMergeGraph.sfmGraph(
                inputPath, outputPath,
                os.path.join(outputPath, "merge_result", "Input"),
                os.path.join(outputPath, "merge_result", "Output", "SfM",
                             "reconstruction"),
                os.path.join(outputPath, "merge_result", "Output", "matches"),
                os.path.join(outputPath, "merge_result", "Input", "csv"),
                os.path.join(outputPath, "merge_result", "Input",
                             "inputImg"), reconstructParam.WORKSPACE_DIR,
                reconstructParam.minReconFrameToAdd)

    if USE_BOW:
        sfmGraph.mergeModel(os.path.join(outputPath, listVideo[0], "matches",
                                         "image_describer.txt"),
                            inputPath,
                            outputPath,
                            reconParam=reconstructParam,
                            reconBOWParam=reconstructBOWParam)
    else:
        sfmGraph.mergeModel(os.path.join(outputPath, listVideo[0], "matches",
                                         "image_describer.txt"),
                            inputPath,
                            outputPath,
                            reconParam=reconstructParam)

    # select largest 3D model and save it
    SfMDataUtils.saveFinalSfM(PROJECT_PATH)
Example #29
0
def main():
    description = 'This script is for converting coordinate information for multiple models.' + \
                'By inputting connecting information for multiple models and local coordinate information for each model, ' + \
                'this script will convert local coordinate information to global coordinate for each model.'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('input_edge_csv', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Input CSV file which have information how each model is connected with other models.')
    parser.add_argument('input_coordinate_csv', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Input CSV file which has file path for input/output coordinate information.')
    args = parser.parse_args()
    input_edge_csv = args.input_edge_csv
    input_coordinate_csv = args.input_coordinate_csv

    # read edge information and target coordinate files
    edgeIdList = []
    edgeConnect = {}
    edgeOffsetX = {}
    edgeOffsetY = {}
    edgeAngle = {}
    with open(input_edge_csv, "r") as f:
        reader = csv.reader(f)
        for row in reader:
            if (len(row) != 5):
                print "invalid csv for edge connection information"
                sys.exit()

            edgeId = int(row[0])
            edgeIdList.append(edgeId)
            edgeConnect[edgeId] = int(row[1])
            edgeOffsetX[edgeId] = float(row[2])
            edgeOffsetY[edgeId] = float(row[3])
            edgeAngle[edgeId] = float(row[4])

    coordFileList = []
    with open(input_coordinate_csv, "r") as f:
        reader = csv.reader(f)
        for row in reader:
            coordFileList.append([row[0], row[1], row[2]])

    # calculate transformation matrix for each edge
    originEdgeId = -1
    for edgeId in edgeIdList:
        if (edgeConnect[edgeId] == -1):
            originEdgeId = edgeId
            break
    if (originEdgeId == -1):
        print "error : cannot find origin edge"
        sys.exit()
    print "origin edge : " + str(originEdgeId)

    # path for each edge from the origin
    edgePathList = {}
    for edgeId in edgeIdList:
        paths = []

        curPath = edgeId
        while True:
            if (curPath == -1):
                break
            paths.append(curPath)
            curPath = edgeConnect[curPath]

        paths.reverse()
        if (paths[0] != originEdgeId):
            print "error : first path is not origin edge"
            sys.exit()
        edgePathList[edgeId] = paths

    # transform for each edge
    edgeTransforms = {}
    for edgeId in edgeIdList:
        transform = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
        for idx, curPath in enumerate(edgePathList[edgeId]):
            if (idx > 0):
                R = rotationMatrix(edgeAngle[curPath])
                T = np.array([edgeOffsetX[curPath], edgeOffsetY[curPath]])
                RT = np.vstack((np.c_[R, T], np.array([0, 0, 1])))
                transform = np.dot(transform, RT)
        edgeTransforms[edgeId] = transform

    # convert coordinate
    for coordFile in coordFileList:
        edgeId = int(coordFile[0])
        print "edge ID : " + str(edgeId)
        print "path : " + str(edgePathList[edgeId])
        print "transform : "
        print(edgeTransforms[edgeId])
        print "input coordinate file : " + coordFile[1]
        print "output coordinate file : " + coordFile[2]

        imageCoordinateList = readImageCoordinateCsv(coordFile[1])

        if not os.path.isdir(os.path.dirname(coordFile[2])):
            FileUtils.makedir(coordFile[2])

        with open(coordFile[2], "w") as outfile:
            for imageCoordinate in imageCoordinateList:
                hcoor = np.array(
                    [imageCoordinate[1][0], imageCoordinate[1][1], 1.0])
                gcoor = np.dot(edgeTransforms[edgeId], hcoor)
                outfile.write(imageCoordinate[0] + " "  + str(gcoor[0]) + " "  + str(gcoor[1]) + " "  \
                              + str(imageCoordinate[1][2]) + "\n")
            outfile.close()
Example #30
0
def cleanSfM(sfm_data_path,matchesFile):
    
    sfm_data = FileUtils.loadjson(sfm_data_path)
    if (len(sfm_data["views"])==0):
        print "No views are used in reconstruction of " + sfm_data_path
        return [[],[]]
    if (len(sfm_data["extrinsics"])==0):
        print "No extrinsics are used in reconstruction of " + sfm_data_path
        return [[],[]]
    
    # get map from ID to index
    viewMap = {}
    for i in range(0,len(sfm_data["views"])):
        viewMap[sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["id_view"]] = i
                
    extMap = {}
    for i in range(0,len(sfm_data["extrinsics"])):
        extMap[sfm_data["extrinsics"][i]["key"]] = i
                
    strMap = {}
    for i in range(0,len(sfm_data["structure"])):
        strMap[sfm_data["structure"][i]["key"]] = i
    
    # find viewIDs of first and last frame used in reconstruction
    firstViewID = len(sfm_data["views"])
    lastViewID = 0
    firstExtID = min(extMap.keys())
    
    for i in range(0,len(sfm_data["views"])):
        viewID = sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["id_view"]
        extID = sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["id_pose"]
        
        if extID in extMap:
            if firstViewID > viewID:
                firstViewID = viewID
            if lastViewID < viewID:
                lastViewID = viewID
                
    if firstViewID >= lastViewID:
        print "No views are used in reconstruction of " + sfm_data_path
        return [[],[]]
    
    # get list of unused view back to front
    # and change the view Index
    unusedImgName = [[],[]]
    for i in range(len(sfm_data["views"])-1,lastViewID,-1):
        unusedImgName[1].append(sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["filename"])
        sfm_data["views"].pop(i)
    
    for i in range(lastViewID,firstViewID-1,-1):
        newViewID = sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["id_view"]-firstViewID
        sfm_data["views"][i]["key"] = newViewID
        sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["id_view"] = newViewID
        sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["id_pose"] = sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["id_pose"] - firstExtID
            
    for i in range(firstViewID-1,-1,-1):
        unusedImgName[0].append(sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["filename"])        
        sfm_data["views"].pop(i)
        
    # change extrinsics ID
    for i in range(0,len(sfm_data["extrinsics"])):
        sfm_data["extrinsics"][i]["key"] = sfm_data["extrinsics"][i]["key"]-firstExtID
        
    # change index of refered view in structure
    for i in range(0,len(sfm_data["structure"])):
        for j in range(0,len(sfm_data["structure"][i]["value"]["observations"])):
            sfm_data["structure"][i]["value"]["observations"][j]["key"] = \
                sfm_data["structure"][i]["value"]["observations"][j]["key"]-firstViewID
    
    # save jsonfile back
    FileUtils.savejson(sfm_data,sfm_data_path)
    
    # update matches file
    for matchfile in matchesFile:
        
        matchFileName = os.path.basename(matchfile)
        matchFileNameTmp = matchFileName.join(random.choice(string.lowercase) for i in range(10)) #random name
        matchDir = os.path.dirname(matchfile)
        
        fout = open(os.path.join(matchDir,matchFileNameTmp),"w")
        
        with open(matchfile,"r") as mfile:
            mode = 0
            write = False
            countLine = 0
            
            for line in mfile:
                
                line = line.strip()
                
                if mode == 0:
                    
                    line = line.split(" ")
                    
                    view1 = int(line[0])
                    view2 = int(line[1])
                    
                    if view1 < firstViewID or view1 > lastViewID or \
                        view2 < firstViewID or view2 > lastViewID:
                        write = False
                    else:
                        write = True
                    
                    if write:
                        # update viewID and write out
                        fout.write(str(int(line[0])-firstViewID))
                        fout.write(" ")
                        fout.write(str(int(line[1])-firstViewID))
                        fout.write("\n")
                    
                    countLine = 0
                    mode = 1
                    
                elif mode == 1:
                    
                    numMatch= int(line)
                    
                    if write:
                        # get number of matches and write out
                        fout.write(line + "\n")
                    
                    mode = 2
                    
                elif mode == 2:
                    
                    if write:
                        # write out matches
                        fout.write(line + "\n")
                    
                    countLine = countLine + 1
                    
                    if countLine == numMatch:
                        mode = 0
                        
        os.rename(os.path.join(matchDir,matchFileName),os.path.join(matchDir,matchFileName+"_old"))        
        os.rename(os.path.join(matchDir,matchFileNameTmp),os.path.join(matchDir,matchFileName))        
    
    return unusedImgName
def saveFinalSfM(projectDir):
    # prepare output directory
    finalOutputDir = os.path.join(projectDir,"Output","final")
    if not os.path.isdir(finalOutputDir):
        FileUtils.makedir(finalOutputDir)
    if not os.path.isdir(os.path.join(finalOutputDir,"Input")):
        FileUtils.makedir(os.path.join(finalOutputDir,"Input"))
    if not os.path.isdir(os.path.join(finalOutputDir,"Input","inputImg")):
        FileUtils.makedir(os.path.join(finalOutputDir,"Input","inputImg"))
    if not os.path.isdir(os.path.join(finalOutputDir,"Input","csv")):
        FileUtils.makedir(os.path.join(finalOutputDir,"Input","csv"))        
    if not os.path.isdir(os.path.join(finalOutputDir,"Output")):
        FileUtils.makedir(os.path.join(finalOutputDir,"Output"))
    if not os.path.isdir(os.path.join(finalOutputDir,"Output","matches")):
        FileUtils.makedir(os.path.join(finalOutputDir,"Output","matches"))
    if not os.path.isdir(os.path.join(finalOutputDir,"Output","SfM")):
        FileUtils.makedir(os.path.join(finalOutputDir,"Output","SfM"))
    if not os.path.isdir(os.path.join(finalOutputDir,"Output","SfM","reconstruction")):
        FileUtils.makedir(os.path.join(finalOutputDir,"Output","SfM","reconstruction"))
    if not os.path.isdir(os.path.join(finalOutputDir,"Output","SfM","reconstruction","global")):
        FileUtils.makedir(os.path.join(finalOutputDir,"Output","SfM","reconstruction","global"))

    maxPoseNum = -1
    selectedSfmOutputDir = ''
    # select largest model from "Output/merge_result" at first
    sfmOutputDirs = sorted(os.listdir(os.path.join(projectDir,"Output","merge_result","Output","SfM","reconstruction")))
    for sfmOutputDir in sfmOutputDirs:
        sfmDataFile = os.path.join(projectDir,"Output","merge_result","Output","SfM","reconstruction",sfmOutputDir,"sfm_data.json")
        if not os.path.exists(sfmDataFile):
            continue
        with open(sfmDataFile) as fp:
            sfmData = json.load(fp)
            poseNum = len(sfmData["extrinsics"])
            if (poseNum > maxPoseNum):
                selectedSfmOutputDir = os.path.join(projectDir,"Output","merge_result","Output","SfM","reconstruction",sfmOutputDir)
                maxPoseNum = poseNum
    # select from single 3D model if merged 3D model does not exist
    if not selectedSfmOutputDir:
        outputDirs = sorted(os.listdir(os.path.join(projectDir,"Output")))
        for outputDir in outputDirs:
            outputDirPath = os.path.join(projectDir,"Output",outputDir)
            if not os.path.isdir(outputDirPath):
                continue
            sfmOutputDir = os.path.join(outputDirPath,"SfM","reconstruction","global")
            sfmDataFile = os.path.join(sfmOutputDir,"sfm_data.json")
            if not os.path.exists(sfmDataFile):
                continue
            with open(sfmDataFile) as fp:
                sfmData = json.load(fp)
                poseNum = len(sfmData["extrinsics"])
                if (poseNum > maxPoseNum):
                    selectedSfmOutputDir = sfmOutputDir
                    maxPoseNum = poseNum
        
    # create symbolic links to all images, csv, and descriptor/feature files
    os.system("cp --remove-destination -s " + os.path.join(projectDir,"Input","*","inputImg","*") + " " + os.path.join(finalOutputDir,"Input","inputImg"))
    os.system("cp --remove-destination -s " + os.path.join(projectDir,"Input","*","csv","*") + " " + os.path.join(finalOutputDir,"Input","csv"))
    os.system("cp --remove-destination -s " + os.path.join(projectDir,"Output","*","matches","*.desc") + " " + os.path.join(finalOutputDir,"Output","matches"))
    os.system("cp --remove-destination -s " + os.path.join(projectDir,"Output","*","matches","*.feat") + " " + os.path.join(finalOutputDir,"Output","matches"))
    os.system("cp --remove-destination -s " + os.path.join(projectDir,"Output","*","matches","*.bow") + " " + os.path.join(finalOutputDir,"Output","matches"))
    
    # copy image_describer.txt
    listVideo = sorted(os.listdir(os.path.join(projectDir,"Input")))
    os.system("cp --remove-destination " + os.path.join(projectDir,"Output", listVideo[0], "matches", "image_describer.txt") + " " + os.path.join(finalOutputDir,"Output","matches"))
    
    # copy listbeacon.txt
    os.system("cp --remove-destination " + os.path.join(projectDir,"Input","listbeacon.txt") + " " + os.path.join(finalOutputDir,"Input"))
    
    # copy SfM result
    os.system("cp --remove-destination -s " + os.path.join(selectedSfmOutputDir,"sfm_data.json") + " " + os.path.join(finalOutputDir,"Output","SfM","reconstruction","global"))
    os.system("cp --remove-destination -s " + os.path.join(selectedSfmOutputDir,"colorized.ply") + " " + os.path.join(finalOutputDir,"Output","SfM","reconstruction","global"))    
    
    # copy PCAfile.yml and BOWfile.yml if exists
    if os.path.exists(os.path.join(projectDir,"Output","merge_result","Output","matches","PCAfile.yml")):
        os.system("cp --remove-destination " + os.path.join(projectDir,"Output","merge_result","Output","matches","PCAfile.yml") + " " + os.path.join(finalOutputDir,"Output","matches"))
    if os.path.exists(os.path.join(projectDir,"Output","merge_result","Output","matches","BOWfile.yml")):
        os.system("cp --remove-destination " + os.path.join(projectDir,"Output","merge_result","Output","matches","BOWfile.yml") + " " + os.path.join(finalOutputDir,"Output","matches"))
    
    # To create same directory structure before merging, create sfm_data.json without structure information in matches directory
    with open(os.path.join(os.path.join(selectedSfmOutputDir,"sfm_data.json"))) as fpr:
        sfmData = json.load(fpr)
        sfmData["extrinsics"] = []
        sfmData["control_points"] = []
        sfmData["structure"] = []
        with open(os.path.join(finalOutputDir,"Output","matches","sfm_data.json"),"w") as fpw:
            json.dump(sfmData, fpw)
    
    # copy beacon.txt if exists
    if os.path.exists(os.path.join(selectedSfmOutputDir,"beacon.txt")):
        os.system("cp --remove-destination " + os.path.join(selectedSfmOutputDir,"beacon.txt") + " " + os.path.join(finalOutputDir,"Output","SfM","reconstruction","global"))
def cleanSfM(sfm_data_path, matchesFile):

    sfm_data = FileUtils.loadjson(sfm_data_path)
    if (len(sfm_data["views"]) == 0):
        print "No views are used in reconstruction of " + sfm_data_path
        return [[], []]
    if (len(sfm_data["extrinsics"]) == 0):
        print "No extrinsics are used in reconstruction of " + sfm_data_path
        return [[], []]

    # get map from ID to index
    viewMap = {}
    for i in range(0, len(sfm_data["views"])):
        viewMap[sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]
                ["id_view"]] = i

    extMap = {}
    for i in range(0, len(sfm_data["extrinsics"])):
        extMap[sfm_data["extrinsics"][i]["key"]] = i

    strMap = {}
    for i in range(0, len(sfm_data["structure"])):
        strMap[sfm_data["structure"][i]["key"]] = i

    # find viewIDs of first and last frame used in reconstruction
    firstViewID = len(sfm_data["views"])
    lastViewID = 0
    firstExtID = min(extMap.keys())

    for i in range(0, len(sfm_data["views"])):
        viewID = sfm_data["views"][i]["value"]["ptr_wrapper"]["data"][
            "id_view"]
        extID = sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["id_pose"]

        if extID in extMap:
            if firstViewID > viewID:
                firstViewID = viewID
            if lastViewID < viewID:
                lastViewID = viewID

    if firstViewID >= lastViewID:
        print "No views are used in reconstruction of " + sfm_data_path
        return [[], []]

    # get list of unused view back to front
    # and change the view Index
    unusedImgName = [[], []]
    for i in range(len(sfm_data["views"]) - 1, lastViewID, -1):
        unusedImgName[1].append(
            sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["filename"])
        sfm_data["views"].pop(i)

    for i in range(lastViewID, firstViewID - 1, -1):
        newViewID = sfm_data["views"][i]["value"]["ptr_wrapper"]["data"][
            "id_view"] - firstViewID
        sfm_data["views"][i]["key"] = newViewID
        sfm_data["views"][i]["value"]["ptr_wrapper"]["data"][
            "id_view"] = newViewID
        sfm_data["views"][i]["value"]["ptr_wrapper"]["data"][
            "id_pose"] = sfm_data["views"][i]["value"]["ptr_wrapper"]["data"][
                "id_pose"] - firstExtID

    for i in range(firstViewID - 1, -1, -1):
        unusedImgName[0].append(
            sfm_data["views"][i]["value"]["ptr_wrapper"]["data"]["filename"])
        sfm_data["views"].pop(i)

    # change extrinsics ID
    for i in range(0, len(sfm_data["extrinsics"])):
        sfm_data["extrinsics"][i][
            "key"] = sfm_data["extrinsics"][i]["key"] - firstExtID

    # change index of refered view in structure
    for i in range(0, len(sfm_data["structure"])):
        for j in range(0,
                       len(sfm_data["structure"][i]["value"]["observations"])):
            sfm_data["structure"][i]["value"]["observations"][j]["key"] = \
                sfm_data["structure"][i]["value"]["observations"][j]["key"]-firstViewID

    # save jsonfile back
    FileUtils.savejson(sfm_data, sfm_data_path)

    # update matches file
    for matchfile in matchesFile:

        matchFileName = os.path.basename(matchfile)
        matchFileNameTmp = matchFileName.join(
            random.choice(string.lowercase) for i in range(10))  #random name
        matchDir = os.path.dirname(matchfile)

        fout = open(os.path.join(matchDir, matchFileNameTmp), "w")

        with open(matchfile, "r") as mfile:
            mode = 0
            write = False
            countLine = 0

            for line in mfile:

                line = line.strip()

                if mode == 0:

                    line = line.split(" ")

                    view1 = int(line[0])
                    view2 = int(line[1])

                    if view1 < firstViewID or view1 > lastViewID or \
                        view2 < firstViewID or view2 > lastViewID:
                        write = False
                    else:
                        write = True

                    if write:
                        # update viewID and write out
                        fout.write(str(int(line[0]) - firstViewID))
                        fout.write(" ")
                        fout.write(str(int(line[1]) - firstViewID))
                        fout.write("\n")

                    countLine = 0
                    mode = 1

                elif mode == 1:

                    numMatch = int(line)

                    if write:
                        # get number of matches and write out
                        fout.write(line + "\n")

                    mode = 2

                elif mode == 2:

                    if write:
                        # write out matches
                        fout.write(line + "\n")

                    countLine = countLine + 1

                    if countLine == numMatch:
                        mode = 0

        os.rename(os.path.join(matchDir, matchFileName),
                  os.path.join(matchDir, matchFileName + "_old"))
        os.rename(os.path.join(matchDir, matchFileNameTmp),
                  os.path.join(matchDir, matchFileName))

    return unusedImgName
    def mergeModel(self, listbeacon, image_descFile, inputPath, outputPath, reconParam=ReconstructParam, 
                   reconIBeaconParam=ReconstructIBeaconParam, reconBOWParam=ReconstructBOWParam, 
                   mergeCoocThresRat=0.25, mergeCoocThresFrame=30):
        print "Begin merging models"
        
        normBeaconApproach = reconIBeaconParam.normApproach
        
        FileUtils.makedir(self.mInputImgPath)
        FileUtils.makedir(self.mCsvPath)
        FileUtils.makedir(self.mMatchesPath)
        FileUtils.makedir(self.mSfMPath)
        
        # generate beacon.txt file for all models
        # we need generate every time to synchronize with sfm_data.json
        for video in self.sfmModel:
            print "Generating beacon.txt for " + video.name
            # write beacon file
            IBeaconUtils.exportBeaconDataForSfmImageFrames(video.csvFolLoc, video.sfm_dataLoc, 
                                                           listbeacon, video.beaconFileLoc, normBeaconApproach)
            
            if (not os.path.isfile(video.beaconFileLoc)):
                print("Error: beacon file %s is not created" % video.beaconFileLoc)
                return
        
        # create symbolic links to all images, csv, and descriptor/feature files
        os.system("cp --remove-destination -s " + os.path.join(inputPath,"*","inputImg","*") + " " + self.mInputImgPath)
        os.system("cp --remove-destination -s " + os.path.join(inputPath,"*","csv","*") + " " + self.mCsvPath)
        os.system("cp --remove-destination -s " + os.path.join(outputPath,"*","matches","*.desc") + " " + self.mMatchesPath)
        os.system("cp --remove-destination -s " + os.path.join(outputPath,"*","matches","*.feat") + " " + self.mMatchesPath)
        os.system("cp --remove-destination -s " + os.path.join(outputPath,"*","matches","*.bow") + " " + self.mMatchesPath)
        
        # copy listbeacon.txt and image_describer.txt
        os.system("cp --remove-destination " + listbeacon + " " + self.mInputPath)
        os.system("cp --remove-destination " + image_descFile + " " + self.mMatchesPath)
         
        listLead = range(0,len(self.sfmModel)) # list of model indexes which can initiate merge (list of model indexes which did not fail merge yet)
        listBye = [] # list of model indexes which will not be used to initiate merge (list of model indexes which already failed merge)
        baseVideo = -1
        mergeCandidatesRemainsForBaseVideo = True
        calBeaconSim = False
        
        while True:
            # update model indexes which are not used to initiate merge
            if not mergeCandidatesRemainsForBaseVideo:
                listBye.append(self.sfmModel[baseVideo].name)
            
            listName = [(x,self.sfmModel[x].name) for x in range(0,len(self.sfmModel))]
            listLead = [x[0] for x in listName if x[1] not in listBye]
            
            # if there was a merge, recalculate the cooccurence graph
            if mergeCandidatesRemainsForBaseVideo:
                # calculate cooccurence graph
                if not calBeaconSim:
                    beaconCoocRat, beaconCoocFrame = self.calCooccurenceGraph(coocThres=reconIBeaconParam.coocThres)
                    calBeaconSim = True
                    
                print "graph edges : " + str(beaconCoocRat)
                print "SfM model names : " + str([x.name for x in self.sfmModel])
                connectionGraph = np.logical_or(beaconCoocRat > mergeCoocThresRat,beaconCoocFrame > mergeCoocThresFrame)
                
                # calculate connected component on graph
                ccLabel = scipy.sparse.csgraph.connected_components(
                    connectionGraph,
                    directed=False)[1]
                        
            # if nore more mergable components
            if len(np.unique(ccLabel)) == len(ccLabel):
                print "No more mergable components. Exiting."
                return
            
            # sort the length of reconstructed frames in each video 
            # from small to large to find the base Video
            reconFrameLenList = [len(self.sfmModel[i].reconFrame) for i in range(0,len(self.sfmModel))]
            reconFrameLenIdx = [x[0] for x in sorted(enumerate(reconFrameLenList), key=lambda y:y[1])]

            # find first base video that has a connected component
            baseVideo = ""
            for video in reconFrameLenIdx:
                if np.sum(ccLabel==ccLabel[video]) > 1 and video in listLead:
                    baseVideo = video
                    break
                
            # this should never be called since program should exit 
            # if there is no connected components in grap 
            if baseVideo == "":
                print "Cannot find connected component to merge. Exiting."
                return

            # get videos that connect to this baseVideo
            # and sort the from smallest to largest as merge order
            neighborVec = np.where(connectionGraph[baseVideo,:])[0]
            neighborVec = neighborVec[neighborVec!=baseVideo] # prevent selecting itself to merge
            mergeCandidate = neighborVec.tolist()
            nReconFrameMergeCand = [len(self.sfmModel[x].reconFrame) for x in mergeCandidate]
            orderMergeCand = [x[0] for x in sorted(enumerate(nReconFrameMergeCand), key=lambda y:y[1])]
            mergeCandidateModel = [self.sfmModel[mergeCandidate[i]] for i in orderMergeCand]

            mergedModel = self.sfmModel[baseVideo]
            
            print "Based model: " + mergedModel.name
            print "To merge with: " + str([x.name for x in mergeCandidateModel])
            mergeCandidatesRemainsForBaseVideo = False            
            for video in mergeCandidateModel:
                
                # check if failed localization has been performed on this pair before
                # if so, skip this localization
                if self.isBadMatch(video,mergedModel):
                    continue
                
                # swap order so small model is merged to larger model
                swap = False
                if len(mergedModel.reconFrame) < len(video.reconFrame):
                    tmp = mergedModel
                    mergedModel = video
                    video = tmp
                    swap = True
                
                # attempt merge
                mergeResult, mergedModelTmp = self.mergeOneModel(mergedModel,video,reconParam,reconIBeaconParam,reconBOWParam)
                
                if mergeResult:
                    mergedModel.update(mergedModelTmp)
                    videoIdx = self.sfmModel.index(video)
                    del self.sfmModel[videoIdx]
                    
                    # update beacon
                    beaconCoocRat = np.delete(beaconCoocRat,videoIdx,0)
                    beaconCoocRat = np.delete(beaconCoocRat,videoIdx,1)
                    beaconCoocFrame = np.delete(beaconCoocFrame,videoIdx,0)
                    beaconCoocFrame = np.delete(beaconCoocFrame,videoIdx,1)
                    beaconCoocRat, beaconCoocFrame = self.updateCooccurenceGraph(beaconCoocRat, beaconCoocFrame, [self.sfmModel.index(mergedModel)], coocThres=reconIBeaconParam.coocThres)
                    
                    self.nMergedModel = self.nMergedModel+1
                    self.save(os.path.join(self.mSfMPath,"global" + str(self.nMergedModel-1),"mergeGraph.txt"))
                    self.save(os.path.join(self.mSfMPath,"mergeGraph.txt"))
                    mergeCandidatesRemainsForBaseVideo = True
                    
                    # reset listBye to allow small model to merge to new large model
                    listBye = []
                    
                    # write result log file
                    with open(os.path.join(self.mSfMPath,"logRecon.txt"),"a") as outLogFile:
                        outLogFile.write(str(self.nMergedModel-1) + " " + mergedModel.name + "\n")
                    
                    # start again
                    break
                else:
                    # add to bad matches
                    self.badMatches.append([video.name,mergedModel.name])
                
                    # save
                    self.save(os.path.join(self.mSfMPath,"mergeGraph.txt"))
                
                    if swap:
                        # swap back if not merged
                        mergedModel = video
def main():
    description = 'This script is for calcularing the matrix for converting 3D model to world coordinate and testing localization.' + \
        'Before running this script, please prepare the text file which has image names and 3D coordinate where photos are taken in Ref folder.'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('project_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where OpenMVG project is located.')
    parser.add_argument('matches_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where OpenMVG created matches files.')
    parser.add_argument('sfm_data_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where OpenMVG sfm_data.json is located.')
    parser.add_argument('-t', '--test-project-dir', action='store', nargs='?', const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where localization test image files are located.')
    parser.add_argument('-o', '--output-json-filename', action='store', nargs='?', const=None, \
                        default='loc_global.json', type=str, choices=None, metavar=None, \
                        help='Output localization result json filename.')
    parser.add_argument('--bow', action='store_true', default=False, \
                        help='Use BOW to accelerate localization if this flag is set (default: False)')    
    parser.add_argument('--beacon', action='store_true', default=False, \
                        help='Use iBeacon to accelerate localization if this flag is set (default: False)')    
    args = parser.parse_args()
    project_dir = args.project_dir
    matches_dir = args.matches_dir
    sfm_data_dir = args.sfm_data_dir
    test_project_dir = args.test_project_dir
    output_json_filename = args.output_json_filename
    USE_BOW = args.bow
    USE_BEACON = args.beacon
    
    BOW_FILE = os.path.join(matches_dir, "BOWfile.yml")
    PCA_FILE = os.path.join(matches_dir, "PCAfile.yml")
    SFM_BEACON_FILE = sfm_data_dir + "/beacon.txt"
    REF_FOLDER = project_dir + "/Ref"
    
    if USE_BOW and not os.path.isfile(BOW_FILE):
        print "Use BOW flag is set, but cannot find BOW model file"
        sys.exit()
    if USE_BEACON and not os.path.isfile(SFM_BEACON_FILE):
        print "Use iBeacon flag is set, but cannot find beacon signal file for SfM data"
        sys.exit()
    
    if not os.path.isfile(os.path.join(REF_FOLDER,"Amat.txt")):
        
        # 1. find transformation between reconstructed coordinate and world coordinate
        
        # 1.1 localize reference images
        REF_FOLDER_LOC = os.path.join(REF_FOLDER,"loc")
        if os.path.isdir(REF_FOLDER_LOC):
            shutil.rmtree(REF_FOLDER_LOC)
        os.mkdir(REF_FOLDER_LOC)
        
        if USE_BOW and not USE_BEACON:
            os.system(reconstructParam.LOCALIZE_PROJECT_PATH + \
                      " " + os.path.join(REF_FOLDER,"inputImg") + \
                      " " + sfm_data_dir + \
                      " " + matches_dir + \
                      " " + REF_FOLDER_LOC + \
                      " -f=" + str(localizeParam.locFeatDistRatio) + \
                      " -r=" + str(localizeParam.locRansacRound) + \
                      " -k=" + str(localizeBOWParam.locKNNnum) + \
                      " -a=" + BOW_FILE + \
                      " -p=" + PCA_FILE)
        elif not USE_BOW and USE_BEACON:
            os.system(reconstructIBeaconParam.LOCALIZE_PROJECT_PATH + \
                      " " + os.path.join(REF_FOLDER,"inputImg") + \
                      " " + sfm_data_dir + \
                      " " + matches_dir + \
                      " " + REF_FOLDER_LOC + \
                      " -f=" + str(localizeParam.locFeatDistRatio) + \
                      " -r=" + str(localizeParam.locRansacRound) + \
                      " -b=" + SFM_BEACON_FILE + \
                      " -e=" + os.path.join(REF_FOLDER,"csv") + \
                      " -k=" + str(localizeIBeaconParam.locKNNnum) + \
                      " -c=" + str(localizeIBeaconParam.coocThres) + \
                      " -v=" + str(localizeIBeaconParam.locSkipSelKNN) + \
                      " -n=" + str(localizeIBeaconParam.normApproach))
        elif USE_BOW and USE_BEACON:
            os.system(reconstructIBeaconParam.LOCALIZE_PROJECT_PATH + \
                      " " + os.path.join(REF_FOLDER,"inputImg") + \
                      " " + sfm_data_dir + \
                      " " + matches_dir + \
                      " " + REF_FOLDER_LOC + \
                      " -f=" + str(localizeParam.locFeatDistRatio) + \
                      " -r=" + str(localizeParam.locRansacRound) + \
                      " -b=" + SFM_BEACON_FILE + \
                      " -e=" + os.path.join(REF_FOLDER,"csv") + \
                      " -k=" + str(localizeIBeaconParam.locKNNnum) + \
                      " -c=" + str(localizeIBeaconParam.coocThres) + \
                      " -v=" + str(localizeIBeaconParam.locSkipSelKNN) + \
                      " -n=" + str(localizeIBeaconParam.normApproach) + \
                      " -kb=" + str(localizeBOWParam.locKNNnum) + \
                      " -a=" + BOW_FILE + \
                      " -p=" + PCA_FILE)
        else:
            os.system(reconstructParam.LOCALIZE_PROJECT_PATH + \
                      " " + os.path.join(REF_FOLDER,"inputImg") + \
                      " " + sfm_data_dir + \
                      " " + matches_dir + \
                      " " + REF_FOLDER_LOC + \
                      " -f=" + str(localizeParam.locFeatDistRatio) + \
                      " -r=" + str(localizeParam.locRansacRound))
        
        # extract centers from all json file and write to a file
        fileLoc = open(os.path.join(REF_FOLDER_LOC,"center.txt"),"w")
        countLocFrame = 0
        
        for filename in sorted(os.listdir(REF_FOLDER_LOC)):
            if filename[-4:]!="json":
                continue
                                    
            countLocFrame = countLocFrame + 1
            with open(os.path.join(REF_FOLDER_LOC,filename)) as locJson:
                #print os.path.join(sfm_locOut,filename)
                locJsonDict = json.load(locJson)
                loc = locJsonDict["t"]
                fileLoc.write(str(loc[0]) + " "  + str(loc[1]) + " "  +str(loc[2]) + " 255 0 0\n" )  
        
        fileLoc.close() 
        
        # read reference data
        mapNameLocRef = FileUtils.loadImageLocationListTxt(os.path.join(REF_FOLDER,"refcoor.txt"))
        
        # read localized json file and find its matching world coordinate
        worldCoor = []
        locCoor = []
        countLoc = 0
        for filename in os.listdir(REF_FOLDER_LOC):
            if filename[-4:] != "json":
                continue
            
            # read json localization file
            with open(os.path.join(REF_FOLDER_LOC,filename)) as jsonlocfile:
                jsonLoc = json.load(jsonlocfile)
                
                imgLocName = os.path.basename(jsonLoc["filename"])
                
                # if file exist in map, add to matrix
                if imgLocName in mapNameLocRef:
                    locCoor.append(jsonLoc["t"])
                    worldCoor.append(mapNameLocRef[imgLocName])
                    countLoc = countLoc + 1
        
        print "From " + str(len(mapNameLocRef)) + " reference images, " + str(countLoc) + " images has been localized."
        
        if countLoc < 4:
            print "Cannot fix to world coordinate because of less than 4 reference points"
            return
        
        # find tranformation
        Amat, inliers = mergeSfM.ransacAffineTransform(np.array(worldCoor).T, np.array(locCoor).T, 
                                                       reconstructParam.ransacThresTransformWorldCoordinateRefImage, ransacRound=1000)
        
        if len(inliers) < 4:
            print "Cannot estimate transformation matrix to world coordinate"
            print Amat
            return
        
        print "Transformation matrix has " + str(len(inliers)) + "inliers"
        print Amat
        
        with open(os.path.join(REF_FOLDER,"Amat.txt"),"w") as AmatFile:
            np.savetxt(AmatFile,Amat)
        FileUtils.convertNumpyMatTxt2OpenCvMatYml(os.path.join(REF_FOLDER,"Amat.txt"), os.path.join(REF_FOLDER,"Amat.yml"), "A")
    else:
        with open(os.path.join(REF_FOLDER,"Amat.txt"),"r") as AmatFile:
            Amat = np.loadtxt(AmatFile)
    
    # convert ply file to world coordinate
    SfmDataUtils.saveGlobalSfM(os.path.join(sfm_data_dir,"sfm_data.json"), os.path.join(REF_FOLDER,"Amat.txt"), os.path.join(sfm_data_dir,"sfm_data_global.json"))
    os.system("openMVG_main_ComputeSfM_DataColor -i " + os.path.join(sfm_data_dir,"sfm_data_global.json") + " -o " + os.path.join(sfm_data_dir,"colorized_global.ply"))   
    PlyUtis.saveCameraPly(os.path.join(sfm_data_dir,"sfm_data_global.json"), os.path.join(sfm_data_dir,"colorized_global_camera.ply"))
    PlyUtis.saveStructurePly(os.path.join(sfm_data_dir,"sfm_data_global.json"), os.path.join(sfm_data_dir,"colorized_global_structure.ply"))
    
    # start localize test
    if test_project_dir:
        for testFolder in sorted(os.listdir(test_project_dir)):
            TEST_DIR = os.path.join(test_project_dir,testFolder)
            
            if not os.path.exists(os.path.join(TEST_DIR,"inputImg")):
                continue
            
            TEST_FOLDER_LOC = os.path.join(TEST_DIR,"loc")
            if not os.path.isfile(os.path.join(TEST_FOLDER_LOC,"center.txt")):
                
                # localize test images
                if os.path.isdir(TEST_FOLDER_LOC):
                    shutil.rmtree(TEST_FOLDER_LOC)
                os.mkdir(TEST_FOLDER_LOC)
                
                if USE_BOW and not USE_BEACON:
                    os.system(reconstructParam.LOCALIZE_PROJECT_PATH + \
                              " " + os.path.join(TEST_DIR,"inputImg") + \
                              " " + sfm_data_dir + \
                              " " + matches_dir + \
                              " " + TEST_FOLDER_LOC + \
                              " -f=" + str(localizeParam.locFeatDistRatio) + \
                              " -r=" + str(localizeParam.locRansacRound) + \
                              " -k=" + str(localizeBOWParam.locKNNnum) + \
                              " -a=" + BOW_FILE + \
                              " -p=" + PCA_FILE)
                elif not USE_BOW and USE_BEACON:
                    os.system(reconstructIBeaconParam.LOCALIZE_PROJECT_PATH + \
                              " " + os.path.join(TEST_DIR,"inputImg") + \
                              " " + sfm_data_dir + \
                              " " + matches_dir + \
                              " " + TEST_FOLDER_LOC + \
                              " -f=" + str(localizeParam.locFeatDistRatio) + \
                              " -r=" + str(localizeParam.locRansacRound) + \
                              " -b=" + SFM_BEACON_FILE + \
                              " -e=" + os.path.join(TEST_DIR,"csv") + \
                              " -k=" + str(localizeIBeaconParam.locKNNnum) + \
                              " -c=" + str(localizeIBeaconParam.coocThres) + \
                              " -v=" + str(localizeIBeaconParam.locSkipSelKNN) + \
                              " -n=" + str(localizeIBeaconParam.normApproach))
                elif USE_BOW and USE_BEACON:
                    os.system(reconstructIBeaconParam.LOCALIZE_PROJECT_PATH + \
                              " " + os.path.join(TEST_DIR,"inputImg") + \
                              " " + sfm_data_dir + \
                              " " + matches_dir + \
                              " " + TEST_FOLDER_LOC + \
                              " -f=" + str(localizeParam.locFeatDistRatio) + \
                              " -r=" + str(localizeParam.locRansacRound) + \
                              " -b=" + SFM_BEACON_FILE + \
                              " -e=" + os.path.join(TEST_DIR,"csv") + \
                              " -k=" + str(localizeIBeaconParam.locKNNnum) + \
                              " -c=" + str(localizeIBeaconParam.coocThres) + \
                              " -v=" + str(localizeIBeaconParam.locSkipSelKNN) + \
                              " -n=" + str(localizeIBeaconParam.normApproach) + \
                              " -kb=" + str(localizeBOWParam.locKNNnum) + \
                              " -a=" + BOW_FILE + \
                              " -p=" + PCA_FILE)
                else:
                    os.system(reconstructParam.LOCALIZE_PROJECT_PATH + \
                              " " + os.path.join(TEST_DIR,"inputImg") + \
                              " " + sfm_data_dir + \
                              " " + matches_dir + \
                              " " + TEST_FOLDER_LOC + \
                              " -f=" + str(localizeParam.locFeatDistRatio) + \
                              " -r=" + str(localizeParam.locRansacRound))
                
                # extract centers from all json file and write to a file
                fileLoc = open(os.path.join(TEST_FOLDER_LOC,"center.txt"),"w")
                countLocFrame = 0
                
                for filename in sorted(os.listdir(TEST_FOLDER_LOC)):
                    if filename[-4:]!="json":
                        continue
                    
                    countLocFrame = countLocFrame + 1
                    with open(os.path.join(TEST_FOLDER_LOC,filename)) as locJson:
                        #print os.path.join(sfm_locOut,filename)
                        locJsonDict = json.load(locJson)
                        loc = locJsonDict["t"]
                        fileLoc.write(str(loc[0]) + " "  + str(loc[1]) + " "  +str(loc[2]) + " 255 0 0\n" )
                
                fileLoc.close()
            
            # convert all localization results to world coordinate and merge to one json file
            locGlobalJsonObj = {}
            locGlobalJsonObj["locGlobal"] = []
            locGlobalPoints = []
            for filename in sorted(os.listdir(TEST_FOLDER_LOC)):
                if filename[-4:]!="json":
                    continue
                with open(os.path.join(TEST_FOLDER_LOC,filename)) as jsonfile:
                    jsonLoc = json.load(jsonfile)
                    
                    jsonLoc["t_relative"] = jsonLoc["t"]
                    jsonLoc["R_relative"] = jsonLoc["R"]
                    jsonLoc["t"] = np.dot(Amat,np.concatenate([jsonLoc["t"],[1]])).tolist()
                    jsonLoc["R"] = np.dot(Amat[:, 0:3],jsonLoc["R"]).tolist()
                    locGlobalJsonObj["locGlobal"].append(jsonLoc)
                    
                    locGlobalPoints.append(jsonLoc["t"])
            with open(os.path.join(TEST_FOLDER_LOC, output_json_filename),"w") as jsonfile:
                json.dump(locGlobalJsonObj, jsonfile)
            
            # save localization results to ply file
            PlyUtis.addPointToPly(os.path.join(sfm_data_dir,"colorized_global_structure.ply"), locGlobalPoints, 
                                  os.path.join(TEST_FOLDER_LOC,"colorized_global_localize.ply"))
Example #35
0
    def mergeOneModel(self, model1, model2, reconParam, reconBOWParam):

        sfmOutPath = os.path.join(self.mSfMPath,
                                  "global" + str(self.nMergedModel))

        # modified by T. IShihara 2016.06.14
        # fix file name too long issue
        #
        # create a temporary folder for reconstructed image of model2
        #inputImgTmpFolder = os.path.join(self.mSfMPath,"inputImgTmp","inputImgTmp"+model2.name)
        inputImgTmpFolder = os.path.join(self.mSfMPath, "inputImgTmp",
                                         "inputImgTmpModel2")
        if os.path.isdir(inputImgTmpFolder):
            FileUtils.removedir(inputImgTmpFolder)

        # copy reconstructed image fom model2 to tmp folder
        sfm_data2 = FileUtils.loadjson(model2.sfm_dataLoc)
        if not os.path.isdir(inputImgTmpFolder):
            listReconFrameName = [
                sfm_data2["views"][x]["value"]["ptr_wrapper"]["data"]
                ["filename"] for x in range(0, len(sfm_data2["views"]))
                if sfm_data2["views"][x]["value"]["ptr_wrapper"]["data"]
                ["id_view"] in model2.reconFrame
            ]
            FileUtils.makedir(inputImgTmpFolder)
            for reconFrameName in listReconFrameName:
                os.system("cp -s " +
                          os.path.join(model2.imgFolLoc, reconFrameName) +
                          " " + inputImgTmpFolder)

        # remove all old localization result
        FileUtils.removedir(model2.locFolLoc)
        FileUtils.makedir(model2.locFolLoc)

        # localize the images from model2 on model1
        guideMatchOption = ""
        if reconParam.bGuidedMatchingLocalize:
            guideMatchOption = " -gm"
        os.system(reconParam.LOCALIZE_PROJECT_PATH + \
                  " " + inputImgTmpFolder + \
                  " " + os.path.dirname(model1.sfm_dataLoc) + \
                  " " + self.mMatchesPath + \
                  " " + model2.locFolLoc + \
                  " -f=" + str(reconParam.locFeatDistRatio) + \
                  " -r=" + str(reconParam.locRansacRound) + \
                  " -i=" + str(reconParam.locSkipFrame) + \
                  " -k=" + str(reconBOWParam.locKNNnum) + \
                  " -a=" + os.path.join(self.mMatchesPath, "BOWfile.yml") + \
                  " -p=" + os.path.join(self.mMatchesPath, "PCAfile.yml") + \
                  guideMatchOption)

        # remove temporary image folder
        # removedir(inputImgTmpFolder)

        # extract centers from all json file and write to a file
        fileLoc = open(os.path.join(model2.locFolLoc, "center.txt"), "w")
        countLocFrame = 0
        for filename in sorted(os.listdir(model2.locFolLoc)):
            if filename[-4:] != "json":
                continue

            countLocFrame = countLocFrame + 1
            with open(os.path.join(model2.locFolLoc, filename)) as locJson:
                #print os.path.join(sfm_locOut,filename)
                locJsonDict = json.load(locJson)
                loc = locJsonDict["t"]
                fileLoc.write(
                    str(loc[0]) + " " + str(loc[1]) + " " + str(loc[2]) +
                    " 255 0 0\n")
        fileLoc.close()

        # get inlier matches
        FileUtils.makedir(sfmOutPath)
        resultSfMDataFile = os.path.join(sfmOutPath, "sfm_data.json")
        # below also checks if the ratio between first and last svd of M[0:3,0:3]
        # is good or not. If not then reject
        # TODO : revisit ransacRound parameter, use number of reconstruction frame to determine structure points transform seems small
        nMatchPointsTmp, nInlierTmp, M = mergeSfM.mergeModel(
            model1.sfm_dataLoc,
            model2.sfm_dataLoc,
            model2.locFolLoc,
            resultSfMDataFile,
            ransacThres=model1.ransacStructureThres,
            mergePointThres=model1.mergeStructureThres,
            ransacRoundMul=reconParam.ransacRoundMul,
            inputImgDir=self.mInputImgPath,
            minLimit=reconParam.min3DnInliers)

        ratioInlierMatchPoints = 0.0
        if nMatchPointsTmp > 0:
            ratioInlierMatchPoints = float(nInlierTmp) / nMatchPointsTmp

        # 3. perform test whether merge is good
        sfm_merge_generated = True
        countFileAgree = 0
        countFileLoc = 1
        if os.path.isfile(resultSfMDataFile):
            os.system(reconParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " +
                      resultSfMDataFile + " " + resultSfMDataFile)
            countFileLoc, countFileAgree = mergeSfM.modelMergeCheckLocal(
                resultSfMDataFile, model2.locFolLoc,
                model1.validMergeRansacThres)
        else:
            sfm_merge_generated = False

        ratioAgreeFrameReconFrame = 0.0
        if (len(model2.reconFrame) > 0):
            ratioAgreeFrameReconFrame = float(countFileAgree) / len(
                model2.reconFrame)
        ratioAgreeFrameLocFrame = 0.0
        if (countFileLoc > 0):
            ratioAgreeFrameLocFrame = float(countFileAgree) / countFileLoc

        # write log file
        with open(
                os.path.join(self.mSfMPath, "global" + str(self.nMergedModel),
                             "log.txt"), "a") as filelog:
            filelog.write(("M1: " + model1.name + "\n" + \
                          "M2: " + model2.name + "\n" + \
                          "nMatchedPoints: " + str(nMatchPointsTmp) + "\n" + \
                          "nInliers: " + str(nInlierTmp) + "\n" + \
                          "ratioInlierWithMatchedPoints: " + str(ratioInlierMatchPoints) + "\n" + \
                          "countLocFrame: " + str(countLocFrame) + "\n" + \
                          "nReconFrame M2: " + str(len(model2.reconFrame)) + "\n" + \
                          "countFileAgree: " + str(countFileAgree) + "\n" + \
                          "countFileLoc: " + str(countFileLoc) + "\n" + \
                          "not sfm_merge_generated: " + str(not sfm_merge_generated) + "\n" + \
                          # obsolete condition by T. Ishihara 2015.11.10
                          #"nInlierTmp > "+str(reconParam.vldMergeRatioInliersFileagree)+"*countFileAgree: " + str(nInlierTmp > reconParam.vldMergeRatioInliersFileagree*countFileAgree) + "\n" + \
                          "countFileAgree > "+str(reconParam.vldMergeMinCountFileAgree)+": " + str(countFileAgree > reconParam.vldMergeMinCountFileAgree) + "\n" + \
                          # obsolete condition by T. Ishihara 2016.04.02
                          #"countFileAgree > "+str(reconParam.vldMergeSmallMinCountFileAgree)+": " + str(countFileAgree > reconParam.vldMergeSmallMinCountFileAgree) + "\n" + \
                          # obsolete condition by T. Ishihara 2016.04.02
                          #"countFileLoc < countFileAgree*" +str(reconParam.vldMergeShortRatio)+ ": " + str(countFileLoc < countFileAgree*reconParam.vldMergeShortRatio) + "\n" + \
                          "ratioLocAgreeWithReconFrame: " + str(ratioAgreeFrameReconFrame) + "\n" + \
                          "ratioLocAgreeWithReconFrame > " + str(reconParam.vldMergeRatioAgrFReconF) + ": " + str(ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconF) + "\n" + \
                          "ratioLocAgreeWithLocFrame: " + str(ratioAgreeFrameLocFrame) + "\n" + \
                          "ratioLocAgreeWithLocFrame > " + str(reconParam.vldMergeRatioAgrFLocF) + ": " + str(ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF) + "\n" + \
                          str(M) + "\n\n"))

        # rename the localization folder to save localization result
        '''
        if os.path.isdir(model2.locFolLoc+model1.name):
            FileUtils.removedir(model2.locFolLoc+model1.name)
        os.rename(model2.locFolLoc,model2.locFolLoc+model1.name)
        '''

        # obsolete merge condition
        '''
        if not sfm_merge_generated or \
            not (nInlierTmp > reconParam.vldMergeRatioInliersFileagree*countFileAgree and \
            ((countFileAgree > reconParam.vldMergeMinCountFileAgree or (countFileAgree > reconParam.vldMergeSmallMinCountFileAgree and countFileLoc < countFileAgree*reconParam.vldMergeShortRatio)) and \
            ((nInlierTmp > reconParam.vldMergeNInliers and float(countFileAgree)/len(model2.reconFrame) > reconParam.vldMergeRatioAgrFReconFNInliers) or float(countFileAgree)/countFileLoc > reconParam.vldMergeRatioAgrFLocF) and
            (float(countFileAgree)/len(model2.reconFrame) > reconParam.vldMergeRatioAgrFReconF))):
        '''
        # update merge condition by T. Ishihara 2015.11.10
        '''
        if not sfm_merge_generated or \
            not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
                 countFileAgree > reconParam.vldMergeSmallMinCountFileAgree and \
                 countFileLoc < countFileAgree*reconParam.vldMergeShortRatio and \
                 ((nInlierTmp > reconParam.vldMergeNInliers and ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconFNInliers) or \
                    ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconF) and \
                 ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF):
        '''
        # update merge condition by T. Ishihara 2016.04.02
        '''
        if not sfm_merge_generated or \
            not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
                 ((nInlierTmp > reconParam.vldMergeNInliers and ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconFNInliers) or \
                    ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconF) and \
                 ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF):
        '''
        # update merge condition by T. Ishihara 2016.06.09
        '''
        if not sfm_merge_generated or \
            not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
                 ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF and \
                 nInlierTmp > reconParam.min3DnInliers and \
                 ratioInlierMatchPoints > reconParam.vldMergeRatioInliersMatchPoints):
        '''
        # update merge condition by T. Ishihara 2016.06.20
        if not sfm_merge_generated or \
            not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
                 ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF and \
                 nInlierTmp > reconParam.min3DnInliers):
            print "Transformed locations do not agree with localization. Skip merge between " + model1.name + " and " + model2.name + "."
            '''
            if os.path.isfile(os.path.join(sfmOutPath,"sfm_data.json")):
                os.rename(os.path.join(sfmOutPath,"sfm_data.json"), \
                          os.path.join(sfmOutPath,"sfm_data_("+model1.name + "," + model2.name+").json"))
            '''
            if os.path.isfile(os.path.join(sfmOutPath, "sfm_data.json")):
                os.rename(os.path.join(sfmOutPath,"sfm_data.json"), \
                          os.path.join(sfmOutPath,"sfm_data_fail_merge.json"))

            # move to next video
            return False, sfmModelBOW("",
                                      "",
                                      "",
                                      "",
                                      "",
                                      "",
                                      validMergeRansacThres=0,
                                      validMergeRansacThresK=0,
                                      ransacStructureThres=0,
                                      ransacStructureThresK=0,
                                      mergeStructureThres=0,
                                      mergeStructureThresK=0)

        # generate colorized before bundle adjustment for comparison
        os.system("openMVG_main_ComputeSfM_DataColor " + " -i " +
                  os.path.join(sfmOutPath, "sfm_data.json") + " -o " +
                  os.path.join(sfmOutPath, "colorized_pre.ply"))

        # TODO : try computing structure from know pose here
        # https://github.com/openMVG/openMVG/issues/246
        # http://openmvg.readthedocs.io/en/latest/software/SfM/ComputeStructureFromKnownPoses/

        # TODO : revisit the order of bundle adjustment
        # perform bundle adjustment
        '''
        os.system(reconParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + os.path.join(sfmOutPath,"sfm_data.json") + " " + os.path.join(sfmOutPath,"sfm_data.json") + \
                  " -c=" + "rs,rst,rsti" + " -r=" + "1")
        '''
        os.system(reconParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + os.path.join(sfmOutPath,"sfm_data.json") + " " + os.path.join(sfmOutPath,"sfm_data.json") + \
                  " -c=" + "rst,rsti" + " -r=" + "1")

        os.system("openMVG_main_ComputeSfM_DataColor " + " -i " +
                  os.path.join(sfmOutPath, "sfm_data.json") + " -o " +
                  os.path.join(sfmOutPath, "colorized.ply"))

        return True, sfmModelBOW(
            "A" + model1.name + "," + model2.name + "Z",
            self.mInputImgPath,
            self.mCsvPath,
            self.mMatchesPath,
            os.path.join(sfmOutPath, "loc"),
            resultSfMDataFile,
            validMergeRansacThres=model1.validMergeRansacThres,
            ransacStructureThres=model1.ransacStructureThres,
            mergeStructureThres=model1.mergeStructureThres)
def main():
    description = 'This script is for calcularing the matrix for converting 3D model to world coordinate and testing localization.' + \
        'Before running this script, please prepare the json file which has 3D coordinate for reference points in Ref folder.' + \
        'You can create reference points json file by sfmCoordinateEditor'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('project_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where OpenMVG project is located.')
    parser.add_argument('matches_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where OpenMVG created matches files.')
    parser.add_argument('sfm_data_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where OpenMVG sfm_data.json is located.')
    parser.add_argument('-t', '--test-project-dir', action='store', nargs='?', const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where localization test image files are located.')
    parser.add_argument('-o', '--output-json-filename', action='store', nargs='?', const=None, \
                        default='loc_global.json', type=str, choices=None, metavar=None, \
                        help='Output localization result json filename.')
    parser.add_argument('--bow', action='store_true', default=False, \
                        help='Use BOW to accelerate localization if this flag is set (default: False)')    
    parser.add_argument('--beacon', action='store_true', default=False, \
                        help='Use iBeacon to accelerate localization if this flag is set (default: False)')    
    parser.add_argument('--reduce-points', action='store_true', default=False, \
                        help='Reduce 3D points if points are close after transforming to global coordinate (default: False)')    
    args = parser.parse_args()
    project_dir = args.project_dir
    matches_dir = args.matches_dir
    sfm_data_dir = args.sfm_data_dir
    test_project_dir = args.test_project_dir
    output_json_filename = args.output_json_filename
    USE_BOW = args.bow
    USE_BEACON = args.beacon
    USE_REDUCE_POINTS = args.reduce_points
    
    BOW_FILE = os.path.join(matches_dir, "BOWfile.yml")
    PCA_FILE = os.path.join(matches_dir, "PCAfile.yml")
    SFM_BEACON_FILE = sfm_data_dir + "/beacon.txt"
    REF_FOLDER = project_dir + "/Ref"
    
    if USE_BOW and not os.path.isfile(BOW_FILE):
        print "Use BOW flag is set, but cannot find BOW model file"
        sys.exit()
    if USE_BEACON and not os.path.isfile(SFM_BEACON_FILE):
        print "Use iBeacon flag is set, but cannot find beacon signal file for SfM data"
        sys.exit()
    
    if not os.path.isfile(os.path.join(REF_FOLDER,"Amat.txt")):
        # 1. find transformation between reconstructed coordinate and world coordinate
        refPoints = FileUtils.loadRefPointsJson(os.path.join(REF_FOLDER,"refpoints.json"))
        
        # read localized json file and find its matching world coordinate
        worldCoor = []
        locCoor = []
        with open(os.path.join(sfm_data_dir, "sfm_data.json")) as fp:
            sfmData = json.load(fp)
            for pointId in refPoints:
                selectedPoint = [point for point in sfmData["structure"] if point["key"]==pointId][0]
                print "relative coordinate : " + str(selectedPoint["value"]["X"])
                print "world coordinate : " + str(refPoints[pointId])
                locCoor.append(selectedPoint["value"]["X"])
                worldCoor.append(refPoints[pointId])
                
        print "Number of reference points : " + str(len(worldCoor))
        
        if len(worldCoor) < 4:
            print "Cannot fix to world coordinate because of less than 4 reference points"
            return
        
        # find tranformation
        Amat, inliers = mergeSfM.ransacTransform(np.array(worldCoor).T, np.array(locCoor).T, 
                                                 reconstructParam.ransacThresTransformWorldCoordinateRefPoint, ransacRound=1000)
        
        if len(inliers) < 4:
            print "Cannot estimate transformation matrix to world coordinate"
            print Amat
            return
        
        print "Transformation matrix has " + str(len(inliers)) + "inliers"
        print Amat
        
        with open(os.path.join(REF_FOLDER,"Amat.txt"),"w") as AmatFile:
            np.savetxt(AmatFile,Amat)
        FileUtils.convertNumpyMatTxt2OpenCvMatYml(os.path.join(REF_FOLDER,"Amat.txt"), os.path.join(REF_FOLDER,"Amat.yml"), "A")
    else:
        with open(os.path.join(REF_FOLDER,"Amat.txt"),"r") as AmatFile:
            Amat = np.loadtxt(AmatFile)
    
    if USE_REDUCE_POINTS:
        print "start reducing 3D points..."
        
        with open(os.path.join(sfm_data_dir, "sfm_data.json")) as fp:
            sfmData = json.load(fp)
        
        with open(os.path.join(REF_FOLDER,"Amat.txt"),"r") as AmatFile:
            Amat = np.loadtxt(AmatFile)
        
        print "point size before reducing : " + str(len(sfmData['structure']))
        # TODO : revisit the threshold for reducing points
        #reduceClosePoints(sfmData, Amat, 0.01)
        reduceClosePointsKDTree(sfmData, Amat, 0.01, 1000)
        print "point size after reducing : " + str(len(sfmData['structure']))
        
        print "start save reduced sfm data..."
        os.system("cp " + os.path.join(sfm_data_dir, "sfm_data.json") + " " + os.path.join(sfm_data_dir, "sfm_data_b4rp.json"))
        with open(os.path.join(sfm_data_dir, "sfm_data.json"), "w") as fp:
            json.dump(sfmData, fp)
        print "finish save reduced sfm data..."
        
        print "finish reducing 3D points."
    
    # convert ply file to world coordinate
    SfmDataUtils.saveGlobalSfM(os.path.join(sfm_data_dir,"sfm_data.json"), os.path.join(REF_FOLDER,"Amat.txt"), os.path.join(sfm_data_dir,"sfm_data_global.json"))
    os.system("openMVG_main_ComputeSfM_DataColor -i " + os.path.join(sfm_data_dir,"sfm_data_global.json") + " -o " + os.path.join(sfm_data_dir,"colorized_global.ply"))   
    PlyUtis.saveCameraPly(os.path.join(sfm_data_dir,"sfm_data_global.json"), os.path.join(sfm_data_dir,"colorized_global_camera.ply"))
    PlyUtis.saveStructurePly(os.path.join(sfm_data_dir,"sfm_data_global.json"), os.path.join(sfm_data_dir,"colorized_global_structure.ply"))
    
    # start localize test
    if test_project_dir:
        for testFolder in sorted(os.listdir(test_project_dir)):
            TEST_DIR = os.path.join(test_project_dir,testFolder)
            
            if not os.path.exists(os.path.join(TEST_DIR,"inputImg")):
                continue
            
            TEST_FOLDER_LOC = os.path.join(TEST_DIR,"loc")
            if not os.path.isfile(os.path.join(TEST_FOLDER_LOC,"center.txt")):
                
                # localize test images
                if os.path.isdir(TEST_FOLDER_LOC):
                    shutil.rmtree(TEST_FOLDER_LOC)
                os.mkdir(TEST_FOLDER_LOC)
                
                if USE_BOW and not USE_BEACON:
                    os.system(reconstructParam.LOCALIZE_PROJECT_PATH + \
                              " " + os.path.join(TEST_DIR,"inputImg") + \
                              " " + sfm_data_dir + \
                              " " + matches_dir + \
                              " " + TEST_FOLDER_LOC + \
                              " -f=" + str(localizeParam.locFeatDistRatio) + \
                              " -r=" + str(localizeParam.locRansacRound) + \
                              " -k=" + str(localizeBOWParam.locKNNnum) + \
                              " -a=" + BOW_FILE + \
                              " -p=" + PCA_FILE)
                elif not USE_BOW and USE_BEACON:
                    os.system(reconstructIBeaconParam.LOCALIZE_PROJECT_PATH + \
                              " " + os.path.join(TEST_DIR,"inputImg") + \
                              " " + sfm_data_dir + \
                              " " + matches_dir + \
                              " " + TEST_FOLDER_LOC + \
                              " -f=" + str(localizeParam.locFeatDistRatio) + \
                              " -r=" + str(localizeParam.locRansacRound) + \
                              " -b=" + SFM_BEACON_FILE + \
                              " -e=" + os.path.join(TEST_DIR,"csv") + \
                              " -k=" + str(localizeIBeaconParam.locKNNnum) + \
                              " -c=" + str(localizeIBeaconParam.coocThres) + \
                              " -v=" + str(localizeIBeaconParam.locSkipSelKNN) + \
                              " -n=" + str(localizeIBeaconParam.normApproach))                
                elif USE_BOW and USE_BEACON:
                    os.system(reconstructIBeaconParam.LOCALIZE_PROJECT_PATH + \
                              " " + os.path.join(TEST_DIR,"inputImg") + \
                              " " + sfm_data_dir + \
                              " " + matches_dir + \
                              " " + TEST_FOLDER_LOC + \
                              " -f=" + str(localizeParam.locFeatDistRatio) + \
                              " -r=" + str(localizeParam.locRansacRound) + \
                              " -b=" + SFM_BEACON_FILE + \
                              " -e=" + os.path.join(TEST_DIR,"csv") + \
                              " -k=" + str(localizeIBeaconParam.locKNNnum) + \
                              " -c=" + str(localizeIBeaconParam.coocThres) + \
                              " -v=" + str(localizeIBeaconParam.locSkipSelKNN) + \
                              " -n=" + str(localizeIBeaconParam.normApproach) + \
                              " -kb=" + str(localizeBOWParam.locKNNnum) + \
                              " -a=" + BOW_FILE + \
                              " -p=" + PCA_FILE)
                else:
                    os.system(reconstructParam.LOCALIZE_PROJECT_PATH + \
                              " " + os.path.join(TEST_DIR,"inputImg") + \
                              " " + sfm_data_dir + \
                              " " + matches_dir + \
                              " " + TEST_FOLDER_LOC + \
                              " -f=" + str(localizeParam.locFeatDistRatio) + \
                              " -r=" + str(localizeParam.locRansacRound))
                
                # extract centers from all json file and write to a file
                fileLoc = open(os.path.join(TEST_FOLDER_LOC,"center.txt"),"w")
                countLocFrame = 0
                
                for filename in sorted(os.listdir(TEST_FOLDER_LOC)):
                    if filename[-4:]!="json":
                        continue
                    
                    countLocFrame = countLocFrame + 1
                    with open(os.path.join(TEST_FOLDER_LOC,filename)) as locJson:
                        #print os.path.join(sfm_locOut,filename)
                        locJsonDict = json.load(locJson)
                        loc = locJsonDict["t"]
                        fileLoc.write(str(loc[0]) + " "  + str(loc[1]) + " "  +str(loc[2]) + " 255 0 0\n" )
                
                fileLoc.close()
            
            # convert all localization results to world coordinate and merge to one json file
            locGlobalJsonObj = {}
            locGlobalJsonObj["locGlobal"] = []
            locGlobalPoints = []
            for filename in sorted(os.listdir(TEST_FOLDER_LOC)):
                if filename[-4:]!="json":
                    continue
                with open(os.path.join(TEST_FOLDER_LOC,filename)) as jsonfile:
                    jsonLoc = json.load(jsonfile)
                    
                    jsonLoc["t_relative"] = jsonLoc["t"]
                    jsonLoc["R_relative"] = jsonLoc["R"]
                    jsonLoc["t"] = np.dot(Amat,np.concatenate([jsonLoc["t"],[1]])).tolist()
                    jsonLoc["R"] = np.dot(jsonLoc["R"],Amat[:, 0:3].T).tolist()
                    locGlobalJsonObj["locGlobal"].append(jsonLoc)
                    
                    locGlobalPoints.append(jsonLoc["t"])
            with open(os.path.join(TEST_FOLDER_LOC, output_json_filename),"w") as jsonfile:
                json.dump(locGlobalJsonObj, jsonfile)
            
            # save localization results to ply file
            PlyUtis.addPointToPly(os.path.join(sfm_data_dir,"colorized_global_structure.ply"), locGlobalPoints, 
                                  os.path.join(TEST_FOLDER_LOC,"colorized_global_localize.ply"))
Example #37
0
def main():
    description = 'This script is for calcularing the matrix for converting 3D model to world coordinate and evaluating localization accuracy.' + \
        'Before running this script, please prepare the text file which has image names and 3D coordinate where photos are taken in Ref and Test folder.'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('project_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where OpenMVG project is located.')
    parser.add_argument('matches_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where OpenMVG created matches files.')
    parser.add_argument('sfm_data_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where OpenMVG sfm_data.json is located.')
    parser.add_argument('--bow', action='store_true', default=False, \
                        help='Use BOW to accelerate localization if this flag is set (default: False)')
    parser.add_argument('--beacon', action='store_true', default=False, \
                        help='Use iBeacon to accelerate localization if this flag is set (default: False)')
    args = parser.parse_args()
    project_dir = args.project_dir
    matches_dir = args.matches_dir
    sfm_data_dir = args.sfm_data_dir
    USE_BOW = args.bow
    USE_BEACON = args.beacon

    BOW_FILE = os.path.join(matches_dir, "BOWfile.yml")
    PCA_FILE = os.path.join(matches_dir, "PCAfile.yml")
    SFM_BEACON_FILE = sfm_data_dir + "/beacon.txt"
    REF_FOLDER = project_dir + "/Ref"
    TEST_FOLDER = project_dir + "/Test"

    if USE_BOW and not os.path.isfile(BOW_FILE):
        print "Use BOW flag is set, but cannot find BOW model file"
        sys.exit()
    if USE_BEACON and not os.path.isfile(SFM_BEACON_FILE):
        print "Use iBeacon flag is set, but cannot find beacon signal file for SfM data"
        sys.exit()

    if not os.path.isfile(os.path.join(REF_FOLDER, "Amat.txt")):

        # 1. find transformation between reconstructed coordinate and world coordinate

        # 1.1 localize reference images
        REF_FOLDER_LOC = os.path.join(REF_FOLDER, "loc")
        if os.path.isdir(REF_FOLDER_LOC):
            shutil.rmtree(REF_FOLDER_LOC)
        os.mkdir(REF_FOLDER_LOC)

        if USE_BOW and not USE_BEACON:
            os.system(reconstructParam.LOCALIZE_PROJECT_PATH + \
                      " " + os.path.join(REF_FOLDER,"inputImg") + \
                      " " + sfm_data_dir + \
                      " " + matches_dir + \
                      " " + REF_FOLDER_LOC + \
                      " -f=" + str(localizeParam.locFeatDistRatio) + \
                      " -r=" + str(localizeParam.locRansacRound) + \
                      " -k=" + str(localizeBOWParam.locKNNnum) + \
                      " -a=" + BOW_FILE + \
                      " -p=" + PCA_FILE)
        elif not USE_BOW and USE_BEACON:
            os.system(reconstructIBeaconParam.LOCALIZE_PROJECT_PATH + \
                      " " + os.path.join(REF_FOLDER,"inputImg") + \
                      " " + sfm_data_dir + \
                      " " + matches_dir + \
                      " " + REF_FOLDER_LOC + \
                      " -f=" + str(localizeParam.locFeatDistRatio) + \
                      " -r=" + str(localizeParam.locRansacRound) + \
                      " -b=" + SFM_BEACON_FILE + \
                      " -e=" + os.path.join(REF_FOLDER,"csv") + \
                      " -k=" + str(localizeIBeaconParam.locKNNnum) + \
                      " -c=" + str(localizeIBeaconParam.coocThres) + \
                      " -v=" + str(localizeIBeaconParam.locSkipSelKNN) + \
                      " -n=" + str(localizeIBeaconParam.normApproach))
        elif USE_BOW and USE_BEACON:
            os.system(reconstructIBeaconParam.LOCALIZE_PROJECT_PATH + \
                      " " + os.path.join(REF_FOLDER,"inputImg") + \
                      " " + sfm_data_dir + \
                      " " + matches_dir + \
                      " " + REF_FOLDER_LOC + \
                      " -f=" + str(localizeParam.locFeatDistRatio) + \
                      " -r=" + str(localizeParam.locRansacRound) + \
                      " -b=" + SFM_BEACON_FILE + \
                      " -e=" + os.path.join(REF_FOLDER,"csv") + \
                      " -k=" + str(localizeIBeaconParam.locKNNnum) + \
                      " -c=" + str(localizeIBeaconParam.coocThres) + \
                      " -v=" + str(localizeIBeaconParam.locSkipSelKNN) + \
                      " -n=" + str(localizeIBeaconParam.normApproach) + \
                      " -kb=" + str(localizeBOWParam.locKNNnum) + \
                      " -a=" + BOW_FILE + \
                      " -p=" + PCA_FILE)
        else:
            os.system(reconstructParam.LOCALIZE_PROJECT_PATH + \
                      " " + os.path.join(REF_FOLDER,"inputImg") + \
                      " " + sfm_data_dir + \
                      " " + matches_dir + \
                      " " + REF_FOLDER_LOC + \
                      " -f=" + str(localizeParam.locFeatDistRatio) + \
                      " -r=" + str(localizeParam.locRansacRound))

        # extract centers from all json file and write to a file
        fileLoc = open(os.path.join(REF_FOLDER_LOC, "center.txt"), "w")
        countLocFrame = 0

        for filename in sorted(os.listdir(REF_FOLDER_LOC)):
            if filename[-4:] != "json":
                continue

            countLocFrame = countLocFrame + 1
            with open(os.path.join(REF_FOLDER_LOC, filename)) as locJson:
                #print os.path.join(sfm_locOut,filename)
                locJsonDict = json.load(locJson)
                loc = locJsonDict["t"]
                fileLoc.write(
                    str(loc[0]) + " " + str(loc[1]) + " " + str(loc[2]) +
                    " 255 0 0\n")

        fileLoc.close()

        # read reference data
        mapNameLocRef = FileUtils.loadImageLocationListTxt(
            os.path.join(REF_FOLDER, "refcoor.txt"))

        # read localized json file and find its matching world coordinate
        worldCoor = []
        locCoor = []
        countLoc = 0
        for filename in os.listdir(REF_FOLDER_LOC):
            if filename[-4:] != "json":
                continue

            # read json localization file
            with open(os.path.join(REF_FOLDER_LOC, filename)) as jsonlocfile:
                jsonLoc = json.load(jsonlocfile)

                imgLocName = os.path.basename(jsonLoc["filename"])

                # if file exist in map, add to matrix
                if imgLocName in mapNameLocRef:
                    locCoor.append(jsonLoc["t"])
                    worldCoor.append(mapNameLocRef[imgLocName])
                    countLoc = countLoc + 1

        print "From " + str(len(mapNameLocRef)) + " reference images, " + str(
            countLoc) + " images has been localized."

        if countLoc < 4:
            print "Cannot fix to world coordinate because of less than 4 reference points"
            return

        # find tranformation
        Amat, inliers = mergeSfM.ransacTransform(
            np.array(worldCoor).T,
            np.array(locCoor).T,
            reconstructParam.ransacThresTransformWorldCoordinateRefImage,
            ransacRound=1000)

        if len(inliers) < 4:
            print "Cannot estimate transformation matrix to world coordinate"
            print Amat
            return

        print "Transformation matrix has " + str(len(inliers)) + "inliers"
        print Amat

        with open(os.path.join(REF_FOLDER, "Amat.txt"), "w") as AmatFile:
            np.savetxt(AmatFile, Amat)
        FileUtils.convertNumpyMatTxt2OpenCvMatYml(
            os.path.join(REF_FOLDER, "Amat.txt"),
            os.path.join(REF_FOLDER, "Amat.yml"), "A")
    else:
        with open(os.path.join(REF_FOLDER, "Amat.txt"), "r") as AmatFile:
            Amat = np.loadtxt(AmatFile)

    # convert ply file to world coordinate
    SfmDataUtils.saveGlobalSfM(
        os.path.join(sfm_data_dir, "sfm_data.json"),
        os.path.join(REF_FOLDER, "Amat.txt"),
        os.path.join(sfm_data_dir, "sfm_data_global.json"))
    os.system("openMVG_main_ComputeSfM_DataColor -i " +
              os.path.join(sfm_data_dir, "sfm_data_global.json") + " -o " +
              os.path.join(sfm_data_dir, "colorized_global.ply"))
    PlyUtis.saveCameraPly(
        os.path.join(sfm_data_dir, "sfm_data_global.json"),
        os.path.join(sfm_data_dir, "colorized_global_camera.ply"))
    PlyUtis.saveStructurePly(
        os.path.join(sfm_data_dir, "sfm_data_global.json"),
        os.path.join(sfm_data_dir, "colorized_global_structure.ply"))

    # start localize test
    TEST_FOLDER_LOC = os.path.join(TEST_FOLDER, "loc")
    if not os.path.isfile(os.path.join(TEST_FOLDER_LOC, "center.txt")):

        # localize test images
        if os.path.isdir(TEST_FOLDER_LOC):
            shutil.rmtree(TEST_FOLDER_LOC)
        os.mkdir(TEST_FOLDER_LOC)

        if USE_BOW and not USE_BEACON:
            os.system(reconstructParam.LOCALIZE_PROJECT_PATH + \
                      " " + os.path.join(TEST_FOLDER,"inputImg") + \
                      " " + sfm_data_dir + \
                      " " + matches_dir + \
                      " " + TEST_FOLDER_LOC + \
                      " -f=" + str(localizeParam.locFeatDistRatio) + \
                      " -r=" + str(localizeParam.locRansacRound) + \
                      " -k=" + str(localizeBOWParam.locKNNnum) + \
                      " -a=" + BOW_FILE + \
                      " -p=" + PCA_FILE)
        elif not USE_BOW and USE_BEACON:
            os.system(reconstructIBeaconParam.LOCALIZE_PROJECT_PATH + \
                      " " + os.path.join(TEST_FOLDER,"inputImg") + \
                      " " + sfm_data_dir + \
                      " " + matches_dir + \
                      " " + TEST_FOLDER_LOC + \
                      " -f=" + str(localizeParam.locFeatDistRatio) + \
                      " -r=" + str(localizeParam.locRansacRound) + \
                      " -b=" + SFM_BEACON_FILE + \
                      " -e=" + os.path.join(TEST_FOLDER,"csv") + \
                      " -k=" + str(localizeIBeaconParam.locKNNnum) + \
                      " -c=" + str(localizeIBeaconParam.coocThres) + \
                      " -v=" + str(localizeIBeaconParam.locSkipSelKNN) + \
                      " -n=" + str(localizeIBeaconParam.normApproach))
        elif USE_BOW and USE_BEACON:
            os.system(reconstructIBeaconParam.LOCALIZE_PROJECT_PATH + \
                      " " + os.path.join(TEST_FOLDER,"inputImg") + \
                      " " + sfm_data_dir + \
                      " " + matches_dir + \
                      " " + TEST_FOLDER_LOC + \
                      " -f=" + str(localizeParam.locFeatDistRatio) + \
                      " -r=" + str(localizeParam.locRansacRound) + \
                      " -b=" + SFM_BEACON_FILE + \
                      " -e=" + os.path.join(TEST_FOLDER,"csv") + \
                      " -k=" + str(localizeIBeaconParam.locKNNnum) + \
                      " -c=" + str(localizeIBeaconParam.coocThres) + \
                      " -v=" + str(localizeIBeaconParam.locSkipSelKNN) + \
                      " -n=" + str(localizeIBeaconParam.normApproach) + \
                      " -kb=" + str(localizeBOWParam.locKNNnum) + \
                      " -a=" + BOW_FILE + \
                      " -p=" + PCA_FILE)
        else:
            os.system(reconstructParam.LOCALIZE_PROJECT_PATH + \
                      " " + os.path.join(TEST_FOLDER,"inputImg") + \
                      " " + sfm_data_dir + \
                      " " + matches_dir + \
                      " " + TEST_FOLDER_LOC + \
                      " -f=" + str(localizeParam.locFeatDistRatio) + \
                      " -r=" + str(localizeParam.locRansacRound))

        # extract centers from all json file and write to a file
        fileLoc = open(os.path.join(TEST_FOLDER_LOC, "center.txt"), "w")
        countLocFrame = 0

        for filename in sorted(os.listdir(TEST_FOLDER_LOC)):
            if filename[-4:] != "json":
                continue

            countLocFrame = countLocFrame + 1
            with open(os.path.join(TEST_FOLDER_LOC, filename)) as locJson:
                #print os.path.join(sfm_locOut,filename)
                locJsonDict = json.load(locJson)
                loc = locJsonDict["t"]
                fileLoc.write(
                    str(loc[0]) + " " + str(loc[1]) + " " + str(loc[2]) +
                    " 255 0 0\n")

        fileLoc.close()

    # read test data
    mapNameLocTest = FileUtils.loadImageLocationListTxt(
        os.path.join(TEST_FOLDER, "testcoor.txt"))

    # read localized json file and find its matching world coordinate
    worldCoorTest = []
    locCoorTest = []
    countLocTest = 0
    for filename in os.listdir(TEST_FOLDER_LOC):
        if filename[-4:] != "json":
            continue

        # read json localization file
        with open(os.path.join(TEST_FOLDER_LOC, filename)) as jsonlocfile:
            jsonLoc = json.load(jsonlocfile)

            imgLocName = os.path.basename(jsonLoc["filename"])

            # if file exist in map, add to matrix
            if imgLocName in mapNameLocTest:
                locCoorTest.append(jsonLoc["t"])
                worldCoorTest.append(mapNameLocTest[imgLocName])
                countLocTest = countLocTest + 1

    # transform loc coordinate to world coordinate
    print "From " + str(len(mapNameLocTest)) + " test images, " + str(
        countLocTest) + " images has been localized."
    if countLocTest == 0:
        return
    locCoorTest1 = np.hstack(
        (locCoorTest, np.ones((len(locCoorTest), 1), dtype=np.float)))
    locCoorTestWorld = np.dot(Amat, locCoorTest1.T).T

    # calculate error
    normDiff = np.linalg.norm(worldCoorTest - locCoorTestWorld, axis=1)
    meanErr = np.mean(normDiff)
    medianErr = np.median(normDiff)
    print "Mean error = " + str(meanErr) + " meters."
    print "Median error = " + str(medianErr) + " meters."
    binEdge = [0.3 * float(x) for x in range(0, 11)]
    hist = np.histogram(normDiff, bins=binEdge)[0]
    print "Histogram of error: " + str(hist)
    print "Cumulative ratio: " + str(
        np.around(np.cumsum(hist, dtype=float) / countLocTest, 2))
    print "Total loc err larger than " + str(
        np.max(binEdge)) + " meters: " + str(countLocTest - np.sum(hist))

    # convert all localization results to world coordinate and merge to one json file
    locGlobalJsonObj = {}
    locGlobalJsonObj["locGlobal"] = []
    locGlobalPoints = []
    for filename in sorted(os.listdir(TEST_FOLDER_LOC)):
        if filename[-4:] != "json":
            continue
        with open(os.path.join(TEST_FOLDER_LOC, filename)) as jsonfile:
            jsonLoc = json.load(jsonfile)

            imgLocName = os.path.basename(jsonLoc["filename"])

            # if file exist in map
            if imgLocName in mapNameLocTest:
                jsonLoc["t_relative"] = jsonLoc["t"]
                jsonLoc["R_relative"] = jsonLoc["R"]
                jsonLoc["t"] = np.dot(Amat, np.concatenate([jsonLoc["t"],
                                                            [1]])).tolist()
                jsonLoc["R"] = np.dot(jsonLoc["R"], Amat[:, 0:3].T).tolist()
                jsonLoc["groundtruth"] = mapNameLocTest[imgLocName]
                locGlobalJsonObj["locGlobal"].append(jsonLoc)

                locGlobalPoints.append(jsonLoc["t"])
    with open(os.path.join(TEST_FOLDER_LOC, "loc_global.json"),
              "w") as jsonfile:
        json.dump(locGlobalJsonObj, jsonfile)

    # save localization results to ply file
    PlyUtis.addPointToPly(
        os.path.join(sfm_data_dir, "colorized_global_structure.ply"),
        locGlobalPoints,
        os.path.join(TEST_FOLDER_LOC, "colorized_global_localize.ply"))
    def mergeOneModel(self, model1, model2, reconParam):

        sfmOutPath = os.path.join(self.mSfMPath,
                                  "global" + str(self.nMergedModel))

        # create a temporary folder for reconstructed image of model2
        inputImgTmpFolder = os.path.join(self.mSfMPath, "inputImgTmp",
                                         "inputImgTmp" + model2.name)

        # copy reconstructed image fom model2 to tmp folder
        sfm_data2 = FileUtils.loadjson(model2.sfm_dataLoc)
        if not os.path.isdir(inputImgTmpFolder):
            listReconFrameName = [
                sfm_data2["views"][x]["value"]["ptr_wrapper"]["data"]
                ["filename"] for x in range(0, len(sfm_data2["views"]))
                if sfm_data2["views"][x]["value"]["ptr_wrapper"]["data"]
                ["id_view"] in model2.reconFrame
            ]
            FileUtils.makedir(inputImgTmpFolder)
            for reconFrameName in listReconFrameName:
                os.system("cp -s " +
                          os.path.join(model2.imgFolLoc, reconFrameName) +
                          " " + inputImgTmpFolder)

        # remove all old localization result
        FileUtils.removedir(model2.locFolLoc)
        FileUtils.makedir(model2.locFolLoc)

        # localize the images from model2 on model1
        os.system(reconParam.LOCALIZE_PROJECT_PATH + \
                  " " + inputImgTmpFolder + \
                  " " + os.path.dirname(model1.sfm_dataLoc) + \
                  " " + self.mMatchesPath + \
                  " " + model2.locFolLoc + \
                  " -f=" + str(reconParam.locFeatDistRatio) + \
                  " -r=" + str(reconParam.locRansacRound) + \
                  " -e=" + model2.csvFolLoc + \
                  " -i=" + str(reconParam.locSkipFrame))

        # remove temporary image folder
        # removedir(inputImgTmpFolder)

        # extract centers from all json file and write to a file
        fileLoc = open(os.path.join(model2.locFolLoc, "center.txt"), "w")
        countLocFrame = 0
        for filename in sorted(os.listdir(model2.locFolLoc)):
            if filename[-4:] != "json":
                continue

            countLocFrame = countLocFrame + 1
            with open(os.path.join(model2.locFolLoc, filename)) as locJson:
                #print os.path.join(sfm_locOut,filename)
                locJsonDict = json.load(locJson)
                loc = locJsonDict["t"]
                fileLoc.write(
                    str(loc[0]) + " " + str(loc[1]) + " " + str(loc[2]) +
                    " 255 0 0\n")
        fileLoc.close()

        # get inlier matches
        FileUtils.makedir(sfmOutPath)
        resultSfMDataFile = os.path.join(sfmOutPath, "sfm_data.json")
        # below also checks if the ratio between first and last svd of M[0:3,0:3]
        # is good or not. If not then reject
        nInlierTmp, M = mergeSfM.mergeModel(
            model1.sfm_dataLoc,
            model2.sfm_dataLoc,
            model2.locFolLoc,
            resultSfMDataFile,
            ransacK=reconParam.ransacStructureThresMul,
            ransacRound=reconParam.ransacRoundMul * len(model1.reconFrame),
            inputImgDir=self.mInputImgPath,
            minLimit=reconParam.min3DnInliers)

        # 3. perform test whether merge is good
        sfm_merge_generated = True
        countFileAgree = 0
        countFileLoc = 1
        if os.path.isfile(resultSfMDataFile):
            os.system(reconParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " +
                      resultSfMDataFile + " " + resultSfMDataFile)
            countFileLoc, countFileAgree = mergeSfM.modelMergeCheckLocal(
                resultSfMDataFile, model2.locFolLoc,
                reconParam.vldMergeAgrFrameThresK)
        else:
            sfm_merge_generated = False

        ratioAgreeFrameReconFrame = 0.0
        if (len(model2.reconFrame) > 0):
            ratioAgreeFrameReconFrame = float(countFileAgree) / len(
                model2.reconFrame)
        ratioAgreeFrameLocFrame = 0.0
        if (countFileLoc > 0):
            ratioAgreeFrameLocFrame = float(countFileAgree) / countFileLoc

        # write log file
        with open(
                os.path.join(self.mSfMPath, "global" + str(self.nMergedModel),
                             "log.txt"), "a") as filelog:
            filelog.write(("M1: " + model1.name + "\n" + \
                          "M2: " + model2.name + "\n" + \
                          "nInliers: " + str(nInlierTmp) + "\n" + \
                          "countLocFrame: " + str(countLocFrame) + "\n" + \
                          "nReconFrame M2: " + str(len(model2.reconFrame)) + "\n" + \
                          "countFileAgree: " + str(countFileAgree) + "\n" + \
                          "countFileLoc: " + str(countFileLoc) + "\n" + \
                          "not sfm_merge_generated: " + str(not sfm_merge_generated) + "\n" + \
                          # obsolete condition by T. Ishihara 2015.11.10
                          #"nInlierTmp > "+str(reconParam.vldMergeRatioInliersFileagree)+"*countFileAgree: " + str(nInlierTmp > reconParam.vldMergeRatioInliersFileagree*countFileAgree) + "\n" + \
                          "countFileAgree > "+str(reconParam.vldMergeMinCountFileAgree)+": " + str(countFileAgree > reconParam.vldMergeMinCountFileAgree) + "\n" + \
                          # obsolete condition by T. Ishihara 2016.04.02
                          #"countFileAgree > "+str(reconParam.vldMergeSmallMinCountFileAgree)+": " + str(countFileAgree > reconParam.vldMergeSmallMinCountFileAgree) + "\n" + \
                          # obsolete condition by T. Ishihara 2016.04.02                          
                          #"countFileLoc < countFileAgree*" +str(reconParam.vldMergeShortRatio)+ ": " + str(countFileLoc < countFileAgree*reconParam.vldMergeShortRatio) + "\n" + \
                          "ratioLocAgreeWithReconFrame: " + str(ratioAgreeFrameReconFrame) + "\n" + \
                          "ratioLocAgreeWithReconFrame > " + str(reconParam.vldMergeRatioAgrFReconF) + ": " + str(ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconF) + "\n" + \
                          "ratioLocAgreeWithLocFrame: " + str(ratioAgreeFrameLocFrame) + "\n" + \
                          "ratioLocAgreeWithLocFrame > " + str(reconParam.vldMergeRatioAgrFLocF) + ": " + str(ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF) + "\n" + \
                          str(M) + "\n\n"))

        # rename the localization folder to save localization result
        if os.path.isdir(model2.locFolLoc + model1.name):
            FileUtils.removedir(model2.locFolLoc + model1.name)
        os.rename(model2.locFolLoc, model2.locFolLoc + model1.name)

        # obsolete merge condition
        '''
        if not sfm_merge_generated or \
            not (nInlierTmp > reconParam.vldMergeRatioInliersFileagree*countFileAgree and \
            ((countFileAgree > reconParam.vldMergeMinCountFileAgree or (countFileAgree > reconParam.vldMergeSmallMinCountFileAgree and countFileLoc < countFileAgree*reconParam.vldMergeShortRatio)) and \
            ((nInlierTmp > reconParam.vldMergeNInliers and float(countFileAgree)/len(model2.reconFrame) > reconParam.vldMergeRatioAgrFReconFNInliers) or float(countFileAgree)/countFileLoc > reconParam.vldMergeRatioAgrFLocF) and
            (float(countFileAgree)/len(model2.reconFrame) > reconParam.vldMergeRatioAgrFReconF))):
        '''
        # update merge condition by T. Ishihara 2015.11.10
        '''
        if not sfm_merge_generated or \
            not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
                 countFileAgree > reconParam.vldMergeSmallMinCountFileAgree and \
                 countFileLoc < countFileAgree*reconParam.vldMergeShortRatio and \
                 ((nInlierTmp > reconParam.vldMergeNInliers and ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconFNInliers) or \
                    ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconF) and \
                 ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF):
        '''
        # update merge condition by T. Ishihara 2016.04.02
        if not sfm_merge_generated or \
            not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
                 ((nInlierTmp > reconParam.vldMergeNInliers and ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconFNInliers) or \
                    ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconF) and \
                 ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF):
            print "Transformed locations do not agree with localization. Skip merge between " + model1.name + " and " + model2.name + "."

            if os.path.isfile(os.path.join(sfmOutPath, "sfm_data.json")):
                os.rename(os.path.join(sfmOutPath,"sfm_data.json"), \
                          os.path.join(sfmOutPath,"sfm_data_("+model1.name + "," + model2.name+").json"))

            # move to next video
            return False, sfmModel("", "", "", "", "", "")

        # generate colorized before bundle adjustment for comparison
        os.system("openMVG_main_ComputeSfM_DataColor " + " -i " +
                  os.path.join(sfmOutPath, "sfm_data.json") + " -o " +
                  os.path.join(sfmOutPath, "colorized_pre.ply"))

        # perform bundle adjustment
        # modified by T.Ishihara 2016.04.08
        # fix only translation at first
        '''
        os.system(reconParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + os.path.join(sfmOutPath,"sfm_data.json") + " " + os.path.join(sfmOutPath,"sfm_data.json") + \
                  " -c=" + "rs,rst,rsti" + " -r=" + "1")
        '''
        os.system(reconParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + os.path.join(sfmOutPath,"sfm_data.json") + " " + os.path.join(sfmOutPath,"sfm_data.json") + \
                  " -c=" + "st,rst,rsti" + " -r=" + "1")

        os.system("openMVG_main_ComputeSfM_DataColor " + " -i " +
                  os.path.join(sfmOutPath, "sfm_data.json") + " -o " +
                  os.path.join(sfmOutPath, "colorized.ply"))

        return True, sfmModel("A" + model1.name + "," + model2.name + "Z",
                              self.mInputImgPath,
                              self.mCsvPath, self.mMatchesPath,
                              os.path.join(sfmOutPath,
                                           "loc"), resultSfMDataFile)
Example #39
0
def mergeModel(sfm_data_dirA,
               sfm_data_dirB,
               locFolderB,
               outfile,
               ransacK=1.0,
               ransacRound=10000,
               inputImgDir="",
               minLimit=4,
               svdRatio=1.75):

    print "Loading sfm_data"
    sfm_dataB = FileUtils.loadjson(sfm_data_dirB)

    # read matching pairs from localization result
    imgnameB, matchlistB = readMatch(locFolderB)

    # get viewID from image name for model B
    viewIDB = imgnameToViewID(imgnameB, sfm_dataB)

    # get mapping between viewID,featID to 3D point ID
    viewFeatMapB = getViewFeatTo3DMap(sfm_dataB)

    # find consistent match between 3D of model B to 3D of model A
    print "Calculating consistent 3D matches"
    match3D_BA = getConsistent3DMatch(viewIDB, matchlistB, viewFeatMapB)
    print "Found " + str(len(match3D_BA)) + " consistent matches"

    # not enough matches
    if len(match3D_BA) <= 4 or len(match3D_BA) <= minLimit:
        return len(match3D_BA), [0]

    # move the load of larger model here to reduce time if merging is not possible
    sfm_dataA = FileUtils.loadjson(sfm_data_dirA)

    # get 3D point. Note that element 0 of each pair in match3D_BA
    # is 3D pt ID of model B and element 1 is that of model A
    print "Load 3D points"
    pointA = get3DPointloc(sfm_dataA, [x[1] for x in match3D_BA])
    pointB = get3DPointloc(sfm_dataB, [x[0] for x in match3D_BA])

    pointAn = np.asarray(pointA, dtype=np.float).T
    pointBn = np.asarray(pointB, dtype=np.float).T

    # calculate ransac threshold
    # calculate as 4 times the median of distance between
    # 3D pt of A
    print "Find transformation with RANSAC"
    # modified by T.Ishihara 2016.04.08
    # median of camera positions merge too many points, use median of structure points instead
    #ransacThres = findMedianThres(sfm_dataA, ransacK)
    ransacThres = findMedianStructurePointsThres(sfm_dataA, ransacK)

    # TODO : replace with RANSAC similarity transform
    # find robust transformation
    M, inliers = ransacAffineTransform(pointAn, pointBn, ransacThres,
                                       ransacRound, svdRatio)
    # cannot find RANSAC transformation
    if (len(inliers) == 0):
        return len(match3D_BA), [0]
    print M

    # stop if not enough inliers
    sSvd = np.linalg.svd(M[0:3, 0:3], compute_uv=0)
    if len(inliers) <= 4 or sSvd[0] / sSvd[-1] > svdRatio:
        return len(inliers), M

    # perform merge
    # last argument is map from inliers 3D pt Id of model B to that of model A
    print "Merging sfm_data"
    merge_sfm_data(sfm_dataA, sfm_dataB, M,
                   {match3D_BA[x][0]: match3D_BA[x][1]
                    for x in inliers})

    # change input image folder
    if inputImgDir != "":
        sfm_dataA["root_path"] = inputImgDir

    # save json file
    print "Saving json file"
    FileUtils.savejson(sfm_dataA, outfile)

    # return number of inliers for transformation
    return len(inliers), M
Example #40
0
def main():
    mat = FileUtils.loadBinMat(os.path.join("../data", "opencv-mat.bin"))
    print "loaded binary matrix : " + str(mat)