def main():
        global num_dense_sample
        
        description = 'This script is for testing posenet'
        parser = argparse.ArgumentParser(description=description)
        parser.add_argument('input_beacon_setting_file', action='store', nargs=None, const=None, \
                            default=None, type=str, choices=None, metavar=None, \
                            help='File path where beacon setting file is saved.')
        parser.add_argument('input_model_dir', action='store', nargs=None, const=None, \
                            default=None, type=str, choices=None, metavar=None, \
                            help='Directory path where input model is saved.')
        parser.add_argument('output_graph_file', action='store', nargs=None, const=None, \
                            default=None, type=str, choices=None, metavar=None, \
                            help='File path where exported graph def protobuf (.pb) file will be saved.')
        parser.add_argument('output_model_dir', action='store', nargs=None, const=None, \
                            default=None, type=str, choices=None, metavar=None, \
                            help='Directory path where output model will be saved.')
        parser.add_argument('-s', '--use_shrink_model', action='store_true', default=False, \
                            help='Use shrink model (default: False)')
        parser.add_argument('-l', '--lstm_model', action='store_true', default=False, \
                            help='Export LSTM model (default: False)')
        args = parser.parse_args()
        input_beacon_setting_file = args.input_beacon_setting_file
        input_model_dir = args.input_model_dir
        output_graph_file = args.output_graph_file
        output_model_dir = args.output_model_dir
        output_model_file = os.path.join(output_model_dir, "model.ckpt")        
        use_shrink_model = args.use_shrink_model
        lstm_model = args.lstm_model
        print "use shrink model for training : " + str(use_shrink_model)
        
        # parse beacon setting file
        beaconmap = IBeaconUtils.parseBeaconSetting(input_beacon_setting_file)
        beacon_num = len(beaconmap.keys())
        
        # convert hd5 file to ckpt
        # https://github.com/keras-team/keras/issues/9040
        K.set_learning_phase(0)
        if use_shrink_model:
                if lstm_model:
                        model = posenet_beacon_no_inception_shrink_lstm_keras.create_posenet(beacon_num, trainable=False)
                else:
                        model = posenet_beacon_no_inception_shrink_keras.create_posenet(beacon_num, trainable=False)
        else:
                print "Do not shrink model is not supported"
                sys.exit()
        model.load_weights(os.path.join(input_model_dir, 'trained_weights.h5'))
        model.summary()

        #Save graph and checkpoint
        session = keras.backend.get_session()
        graph = session.graph
        graph_def = graph.as_graph_def()
        with gfile.GFile(output_graph_file, 'wb') as f:
            f.write(graph_def.SerializeToString())
        
        saver = tf.train.Saver()
        saver.save(session, output_model_file)
def parse_beacon_string(beaconstr,beaconmap):
    splitline = beaconstr.split(",")
    
    timestamp = int(splitline[0])
    
    # parse by type
    if splitline[1] != "Beacon":
        print "invalid beacon signal"
        sys.exit()

    rssi = IBeaconUtils.parseBeaconList(splitline[6:],beaconmap)
    beacon = rssi.astype(np.float32).reshape(len(rssi),1,1)
    return beacon
    def updateCooccurenceGraph(self,
                               beaconCoocMatRat,
                               beaconCoocMatFrame,
                               listToUpdate,
                               coocThres=0.75):

        print "Update beacon cooccurence between videos"
        nModel = len(self.sfmModel)

        listDone = []
        for i in listToUpdate:
            for j in range(0, nModel):

                if [i, j] in listDone or [j, i] in listDone:
                    continue
                else:
                    listDone.append([i, j])

                # find smaller and larger model
                if len(self.sfmModel[i].reconFrame) < len(
                        self.sfmModel[j].reconFrame):
                    smallerModel = self.sfmModel[i]
                    largerModel = self.sfmModel[j]
                else:
                    smallerModel = self.sfmModel[j]
                    largerModel = self.sfmModel[i]

                # calculate beacon cooccurence
                beaconCooc = IBeaconUtils.findNumImgByBeaconIntersect(
                    smallerModel.beaconFileLoc,
                    largerModel.beaconFileLoc,
                    coocThres,  # minimum beacon signal cooccurence
                    valQ=smallerModel.reconFrame)

                # save value frame
                beaconCoocMatFrame[i, j] = beaconCooc
                beaconCoocMatFrame[j, i] = beaconCooc

                beaconCooc = (beaconCooc + 0.0) / len(smallerModel.reconFrame)

                # save values ratio
                beaconCoocMatRat[i, j] = beaconCooc
                beaconCoocMatRat[j, i] = beaconCooc

        print "Update calculating beacon cooccurrence"
        return beaconCoocMatRat, beaconCoocMatFrame
 def updateCooccurenceGraph(self,beaconCoocMatRat,beaconCoocMatFrame,listToUpdate,coocThres=0.75):
     
     print "Update beacon cooccurence between videos"
     nModel = len(self.sfmModel)
     
     listDone = []
     for i in listToUpdate:
         for j in range(0,nModel):
             
             if [i,j] in listDone or [j,i] in listDone:
                 continue
             else:
                 listDone.append([i,j])
             
             # find smaller and larger model
             if len(self.sfmModel[i].reconFrame) < len(self.sfmModel[j].reconFrame):
                 smallerModel = self.sfmModel[i]
                 largerModel = self.sfmModel[j]
             else: 
                 smallerModel = self.sfmModel[j]
                 largerModel = self.sfmModel[i]
             
             # calculate beacon cooccurence
             beaconCooc = IBeaconUtils.findNumImgByBeaconIntersect(
                 smallerModel.beaconFileLoc,
                 largerModel.beaconFileLoc,
                 coocThres, # minimum beacon signal cooccurence
                 valQ=smallerModel.reconFrame)
             
             # save value frame
             beaconCoocMatFrame[i,j] = beaconCooc
             beaconCoocMatFrame[j,i] = beaconCooc
             
             beaconCooc = (beaconCooc+0.0)/len(smallerModel.reconFrame)
             
             # save values ratio
             beaconCoocMatRat[i,j] = beaconCooc
             beaconCoocMatRat[j,i] = beaconCooc
     
     print "Update calculating beacon cooccurrence"
     return beaconCoocMatRat,beaconCoocMatFrame
    def calCooccurenceGraph(self, coocThres=0.5):

        print "Calculating beacon cooccurence between videos"
        nModel = len(self.sfmModel)
        beaconCoocMatRat = np.zeros((nModel, nModel), dtype=np.float32)
        beaconCoocMatFrame = np.zeros((nModel, nModel), dtype=np.int16)

        for i in range(0, nModel - 1):
            for j in range(i + 1, nModel):

                # find smaller and larger model
                if len(self.sfmModel[i].reconFrame) < len(
                        self.sfmModel[j].reconFrame):
                    smallerModel = self.sfmModel[i]
                    largerModel = self.sfmModel[j]
                else:
                    smallerModel = self.sfmModel[j]
                    largerModel = self.sfmModel[i]

                # calculate beacon cooccurence
                beaconCooc = IBeaconUtils.findNumImgByBeaconIntersect(
                    smallerModel.beaconFileLoc,
                    largerModel.beaconFileLoc,
                    coocThres,  # minimum beacon signal cooccurence
                    valQ=smallerModel.reconFrame)

                # save value frame
                beaconCoocMatFrame[i, j] = beaconCooc
                beaconCoocMatFrame[j, i] = beaconCooc

                beaconCooc = (beaconCooc + 0.0) / len(smallerModel.reconFrame)

                # save values ratio
                beaconCoocMatRat[i, j] = beaconCooc
                beaconCoocMatRat[j, i] = beaconCooc

        print "Complete calculating beacon cooccurrence"
        return beaconCoocMatRat, beaconCoocMatFrame
 def calCooccurenceGraph(self,coocThres=0.5):
     
     print "Calculating beacon cooccurence between videos"
     nModel = len(self.sfmModel)
     beaconCoocMatRat = np.zeros((nModel,nModel),dtype=np.float32)
     beaconCoocMatFrame = np.zeros((nModel,nModel),dtype=np.int16)
     
     for i in range(0,nModel-1):
         for j in range(i+1,nModel):
             
             # find smaller and larger model
             if len(self.sfmModel[i].reconFrame) < len(self.sfmModel[j].reconFrame):
                 smallerModel = self.sfmModel[i]
                 largerModel = self.sfmModel[j]
             else: 
                 smallerModel = self.sfmModel[j]
                 largerModel = self.sfmModel[i]
             
             # calculate beacon cooccurence
             beaconCooc = IBeaconUtils.findNumImgByBeaconIntersect(
                 smallerModel.beaconFileLoc,
                 largerModel.beaconFileLoc,
                 coocThres, # minimum beacon signal cooccurence
                 valQ=smallerModel.reconFrame)
             
             # save value frame
             beaconCoocMatFrame[i,j] = beaconCooc
             beaconCoocMatFrame[j,i] = beaconCooc
             
             beaconCooc = (beaconCooc+0.0)/len(smallerModel.reconFrame)
             
             # save values ratio
             beaconCoocMatRat[i,j] = beaconCooc
             beaconCoocMatRat[j,i] = beaconCooc
     
     print "Complete calculating beacon cooccurrence"
     return beaconCoocMatRat,beaconCoocMatFrame
Esempio n. 7
0
    def mergeModel(self, listbeacon, image_descFile, inputPath, outputPath, reconParam=ReconstructParam, 
                   reconIBeaconParam=ReconstructIBeaconParam, reconBOWParam=ReconstructBOWParam, 
                   mergeCoocThresRat=0.25, mergeCoocThresFrame=30):
        print "Begin merging models"
        
        normBeaconApproach = reconIBeaconParam.normApproach
        
        FileUtils.makedir(self.mInputImgPath)
        FileUtils.makedir(self.mCsvPath)
        FileUtils.makedir(self.mMatchesPath)
        FileUtils.makedir(self.mSfMPath)
        
        # generate beacon.txt file for all models
        # we need generate every time to synchronize with sfm_data.json
        for video in self.sfmModel:
            print "Generating beacon.txt for " + video.name
            # write beacon file
            IBeaconUtils.exportBeaconDataForSfmImageFrames(video.csvFolLoc, video.sfm_dataLoc, 
                                                           listbeacon, video.beaconFileLoc, normBeaconApproach)
            
            if (not os.path.isfile(video.beaconFileLoc)):
                print("Error: beacon file %s is not created" % video.beaconFileLoc)
                return
        
        # create symbolic links to all images, csv, and descriptor/feature files
        os.system("cp --remove-destination -s " + os.path.join(inputPath,"*","inputImg","*") + " " + self.mInputImgPath)
        os.system("cp --remove-destination -s " + os.path.join(inputPath,"*","csv","*") + " " + self.mCsvPath)
        os.system("cp --remove-destination -s " + os.path.join(outputPath,"*","matches","*.desc") + " " + self.mMatchesPath)
        os.system("cp --remove-destination -s " + os.path.join(outputPath,"*","matches","*.feat") + " " + self.mMatchesPath)
        os.system("cp --remove-destination -s " + os.path.join(outputPath,"*","matches","*.bow") + " " + self.mMatchesPath)
        
        # copy listbeacon.txt and image_describer.txt
        os.system("cp --remove-destination " + listbeacon + " " + self.mInputPath)
        os.system("cp --remove-destination " + image_descFile + " " + self.mMatchesPath)
         
        listLead = range(0,len(self.sfmModel)) # list of model indexes which can initiate merge (list of model indexes which did not fail merge yet)
        listBye = [] # list of model indexes which will not be used to initiate merge (list of model indexes which already failed merge)
        baseVideo = -1
        mergeCandidatesRemainsForBaseVideo = True
        calBeaconSim = False
        
        while True:
            # update model indexes which are not used to initiate merge
            if not mergeCandidatesRemainsForBaseVideo:
                listBye.append(self.sfmModel[baseVideo].name)
            
            listName = [(x,self.sfmModel[x].name) for x in range(0,len(self.sfmModel))]
            listLead = [x[0] for x in listName if x[1] not in listBye]
            
            # if there was a merge, recalculate the cooccurence graph
            if mergeCandidatesRemainsForBaseVideo:
                # calculate cooccurence graph
                if not calBeaconSim:
                    beaconCoocRat, beaconCoocFrame = self.calCooccurenceGraph(coocThres=reconIBeaconParam.coocThres)
                    calBeaconSim = True
                    
                print "graph edges : " + str(beaconCoocRat)
                print "SfM model names : " + str([x.name for x in self.sfmModel])
                connectionGraph = np.logical_or(beaconCoocRat > mergeCoocThresRat,beaconCoocFrame > mergeCoocThresFrame)
                
                # calculate connected component on graph
                ccLabel = scipy.sparse.csgraph.connected_components(
                    connectionGraph,
                    directed=False)[1]
                        
            # if nore more mergable components
            if len(np.unique(ccLabel)) == len(ccLabel):
                print "No more mergable components. Exiting."
                return
            
            # sort the length of reconstructed frames in each video 
            # from small to large to find the base Video
            reconFrameLenList = [len(self.sfmModel[i].reconFrame) for i in range(0,len(self.sfmModel))]
            reconFrameLenIdx = [x[0] for x in sorted(enumerate(reconFrameLenList), key=lambda y:y[1])]

            # find first base video that has a connected component
            baseVideo = ""
            for video in reconFrameLenIdx:
                if np.sum(ccLabel==ccLabel[video]) > 1 and video in listLead:
                    baseVideo = video
                    break
                
            # this should never be called since program should exit 
            # if there is no connected components in grap 
            if baseVideo == "":
                print "Cannot find connected component to merge. Exiting."
                return

            # get videos that connect to this baseVideo
            # and sort the from smallest to largest as merge order
            neighborVec = np.where(connectionGraph[baseVideo,:])[0]
            neighborVec = neighborVec[neighborVec!=baseVideo] # prevent selecting itself to merge
            mergeCandidate = neighborVec.tolist()
            nReconFrameMergeCand = [len(self.sfmModel[x].reconFrame) for x in mergeCandidate]
            orderMergeCand = [x[0] for x in sorted(enumerate(nReconFrameMergeCand), key=lambda y:y[1])]
            mergeCandidateModel = [self.sfmModel[mergeCandidate[i]] for i in orderMergeCand]

            mergedModel = self.sfmModel[baseVideo]
            
            print "Based model: " + mergedModel.name
            print "To merge with: " + str([x.name for x in mergeCandidateModel])
            mergeCandidatesRemainsForBaseVideo = False            
            for video in mergeCandidateModel:
                
                # check if failed localization has been performed on this pair before
                # if so, skip this localization
                if self.isBadMatch(video,mergedModel):
                    continue
                
                # swap order so small model is merged to larger model
                swap = False
                if len(mergedModel.reconFrame) < len(video.reconFrame):
                    tmp = mergedModel
                    mergedModel = video
                    video = tmp
                    swap = True
                
                # attempt merge
                mergeResult, mergedModelTmp = self.mergeOneModel(mergedModel,video,reconParam,reconIBeaconParam,reconBOWParam)
                
                if mergeResult:
                    mergedModel.update(mergedModelTmp)
                    videoIdx = self.sfmModel.index(video)
                    del self.sfmModel[videoIdx]
                    
                    # update beacon
                    beaconCoocRat = np.delete(beaconCoocRat,videoIdx,0)
                    beaconCoocRat = np.delete(beaconCoocRat,videoIdx,1)
                    beaconCoocFrame = np.delete(beaconCoocFrame,videoIdx,0)
                    beaconCoocFrame = np.delete(beaconCoocFrame,videoIdx,1)
                    beaconCoocRat, beaconCoocFrame = self.updateCooccurenceGraph(beaconCoocRat, beaconCoocFrame, [self.sfmModel.index(mergedModel)], coocThres=reconIBeaconParam.coocThres)
                    
                    self.nMergedModel = self.nMergedModel+1
                    self.save(os.path.join(self.mSfMPath,"global" + str(self.nMergedModel-1),"mergeGraph.txt"))
                    self.save(os.path.join(self.mSfMPath,"mergeGraph.txt"))
                    mergeCandidatesRemainsForBaseVideo = True
                    
                    # reset listBye to allow small model to merge to new large model
                    listBye = []
                    
                    # write result log file
                    with open(os.path.join(self.mSfMPath,"logRecon.txt"),"a") as outLogFile:
                        outLogFile.write(str(self.nMergedModel-1) + " " + mergedModel.name + "\n")
                    
                    # start again
                    break
                else:
                    # add to bad matches
                    self.badMatches.append([video.name,mergedModel.name])
                
                    # save
                    self.save(os.path.join(self.mSfMPath,"mergeGraph.txt"))
                
                    if swap:
                        # swap back if not merged
                        mergedModel = video
Esempio n. 8
0
    def mergeOneModel(self, model1, model2, reconParam, reconIBeaconParam, reconBOWParam):
        
        sfmOutPath = os.path.join(self.mSfMPath,"global"+str(self.nMergedModel))
        
        # modified by T. IShihara 2016.06.14
        # fix file name too long issue
        # 
        # create a temporary folder for reconstructed image of model2
        #inputImgTmpFolder = os.path.join(self.mSfMPath,"inputImgTmp","inputImgTmp"+model2.name)        
        inputImgTmpFolder = os.path.join(self.mSfMPath,"inputImgTmp","inputImgTmpModel2")
        if os.path.isdir(inputImgTmpFolder):
            FileUtils.removedir(inputImgTmpFolder)
        
        # copy reconstructed image fom model2 to tmp folder
        sfm_data2 = FileUtils.loadjson(model2.sfm_dataLoc)
        if not os.path.isdir(inputImgTmpFolder):
            listReconFrameName = [sfm_data2["views"][x]["value"]["ptr_wrapper"]["data"]["filename"] for x in range(0,len(sfm_data2["views"])) if sfm_data2["views"][x]["value"]["ptr_wrapper"]["data"]["id_view"] in model2.reconFrame]
            FileUtils.makedir(inputImgTmpFolder)
            for reconFrameName in listReconFrameName:
                os.system("cp -s " + os.path.join(model2.imgFolLoc,reconFrameName) + " " + inputImgTmpFolder)
        
        
        # remove all old localization result
        FileUtils.removedir(model2.locFolLoc) 
        FileUtils.makedir(model2.locFolLoc)

        # localize the images from model2 on model1
        if self.useBow:
            os.system(reconIBeaconParam.LOCALIZE_PROJECT_PATH + \
                      " " + inputImgTmpFolder + \
                      " " + os.path.dirname(model1.sfm_dataLoc) + \
                      " " + self.mMatchesPath + \
                      " " + model2.locFolLoc + \
                      " -f=" + str(reconParam.locFeatDistRatio) + \
                      " -r=" + str(reconParam.locRansacRound) + \
                      " -b=" + model1.beaconFileLoc + \
                      " -e=" + model2.csvFolLoc + \
                      " -k=" + str(reconIBeaconParam.locKNNnum) + \
                      " -c=" + str(reconIBeaconParam.coocThres) + \
                      " -i=" + str(reconParam.locSkipFrame) + \
                      " -v=" + str(reconIBeaconParam.locSkipSelKNN) + \
                      " -n=" + str(reconIBeaconParam.normApproach) + \
                      " -kb=" + str(reconBOWParam.locKNNnum) + \
                      " -a=" + os.path.join(self.mMatchesPath, "BOWfile.yml") + \
                      " -p=" + os.path.join(self.mMatchesPath, "PCAfile.yml"))                                  
        else:
            os.system(reconIBeaconParam.LOCALIZE_PROJECT_PATH + \
                      " " + inputImgTmpFolder + \
                      " " + os.path.dirname(model1.sfm_dataLoc) + \
                      " " + self.mMatchesPath + \
                      " " + model2.locFolLoc + \
                      " -f=" + str(reconParam.locFeatDistRatio) + \
                      " -r=" + str(reconParam.locRansacRound) + \
                      " -b=" + model1.beaconFileLoc + \
                      " -e=" + model2.csvFolLoc + \
                      " -k=" + str(reconIBeaconParam.locKNNnum) + \
                      " -c=" + str(reconIBeaconParam.coocThres) + \
                      " -i=" + str(reconParam.locSkipFrame) + \
                      " -v=" + str(reconIBeaconParam.locSkipSelKNN) + \
                      " -n=" + str(reconIBeaconParam.normApproach))
                  
        # remove temporary image folder
        # removedir(inputImgTmpFolder)
        
        # extract centers from all json file and write to a file
        fileLoc = open(os.path.join(model2.locFolLoc,"center.txt"),"w")
        countLocFrame = 0
        for filename in sorted(os.listdir(model2.locFolLoc)):
            if filename[-4:]!="json":
                continue
            
            countLocFrame = countLocFrame + 1
            with open(os.path.join(model2.locFolLoc,filename)) as locJson:
                #print os.path.join(sfm_locOut,filename)
                locJsonDict = json.load(locJson)
                loc = locJsonDict["t"]
                fileLoc.write(str(loc[0]) + " "  + str(loc[1]) + " "  +str(loc[2]) + " 255 0 0\n" )   
        fileLoc.close() 
        
        # get inlier matches
        FileUtils.makedir(sfmOutPath)
        resultSfMDataFile = os.path.join(sfmOutPath,"sfm_data.json")
        # below also checks if the ratio between first and last svd of M[0:3,0:3] 
        # is good or not. If not then reject
        # TODO : revisit ransacRound parameter, use number of reconstruction frame to determine structure points transform seems small
        nMatchPointsTmp, nInlierTmp, M = mergeSfM.mergeModel(model1.sfm_dataLoc,
                            model2.sfm_dataLoc,
                            model2.locFolLoc,
                            resultSfMDataFile,
                            ransacThres=model1.ransacStructureThres,
                            mergePointThres=model1.mergeStructureThres,
                            ransacRoundMul=reconParam.ransacRoundMul,
                            inputImgDir=self.mInputImgPath,
                            minLimit=reconParam.min3DnInliers)
        
        ratioInlierMatchPoints = 0.0
        if nMatchPointsTmp>0:
            ratioInlierMatchPoints = float(nInlierTmp)/nMatchPointsTmp
        
        # 3. perform test whether merge is good
        sfm_merge_generated = True
        countFileAgree = 0
        countFileLoc = 1
        if os.path.isfile(resultSfMDataFile):
            os.system(reconParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + resultSfMDataFile + " " + resultSfMDataFile)
            countFileLoc, countFileAgree = mergeSfM.modelMergeCheckLocal(resultSfMDataFile, model2.locFolLoc, model1.validMergeRansacThres)
        else:
            sfm_merge_generated = False
        
        ratioAgreeFrameReconFrame = 0.0
        if (len(model2.reconFrame)>0):
            ratioAgreeFrameReconFrame = float(countFileAgree)/len(model2.reconFrame)
        ratioAgreeFrameLocFrame = 0.0
        if (countFileLoc>0):
            ratioAgreeFrameLocFrame = float(countFileAgree)/countFileLoc
        
        # write log file
        with open(os.path.join(self.mSfMPath,"global"+str(self.nMergedModel),"log.txt"),"a") as filelog:
            filelog.write(("M1: " + model1.name + "\n" + \
                          "M2: " + model2.name + "\n" + \
                          "nMatchedPoints: " + str(nMatchPointsTmp) + "\n" + \
                          "nInliers: " + str(nInlierTmp) + "\n" + \
                          "ratioInlierWithMatchedPoints: " + str(ratioInlierMatchPoints) + "\n" + \
                          "countLocFrame: " + str(countLocFrame) + "\n" + \
                          "nReconFrame M2: " + str(len(model2.reconFrame)) + "\n" + \
                          "countFileAgree: " + str(countFileAgree) + "\n" + \
                          "countFileLoc: " + str(countFileLoc) + "\n" + \
                          "not sfm_merge_generated: " + str(not sfm_merge_generated) + "\n" + \
                          # obsolete condition by T. Ishihara 2015.11.10
                          #"nInlierTmp > "+str(reconParam.vldMergeRatioInliersFileagree)+"*countFileAgree: " + str(nInlierTmp > reconParam.vldMergeRatioInliersFileagree*countFileAgree) + "\n" + \
                          "countFileAgree > "+str(reconParam.vldMergeMinCountFileAgree)+": " + str(countFileAgree > reconParam.vldMergeMinCountFileAgree) + "\n" + \
                          # obsolete condition by T. Ishihara 2016.04.02
                          #"countFileAgree > "+str(reconParam.vldMergeSmallMinCountFileAgree)+": " + str(countFileAgree > reconParam.vldMergeSmallMinCountFileAgree) + "\n" + \
                          # obsolete condition by T. Ishihara 2016.04.02
                          #"countFileLoc < countFileAgree*" +str(reconParam.vldMergeShortRatio)+ ": " + str(countFileLoc < countFileAgree*reconParam.vldMergeShortRatio) + "\n" + \
                          "ratioLocAgreeWithReconFrame: " + str(ratioAgreeFrameReconFrame) + "\n" + \
                          "ratioLocAgreeWithReconFrame > " + str(reconParam.vldMergeRatioAgrFReconF) + ": " + str(ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconF) + "\n" + \
                          "ratioLocAgreeWithLocFrame: " + str(ratioAgreeFrameLocFrame) + "\n" + \
                          "ratioLocAgreeWithLocFrame > " + str(reconParam.vldMergeRatioAgrFLocF) + ": " + str(ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF) + "\n" + \
                          str(M) + "\n\n"))
       
        # rename the localization folder to save localization result
        '''
        if os.path.isdir(model2.locFolLoc+model1.name):
            FileUtils.removedir(model2.locFolLoc+model1.name)
        os.rename(model2.locFolLoc,model2.locFolLoc+model1.name)
        '''
        
        # obsolete merge condition
        '''
        if not sfm_merge_generated or \
            not (nInlierTmp > reconParam.vldMergeRatioInliersFileagree*countFileAgree and \
            ((countFileAgree > reconParam.vldMergeMinCountFileAgree or (countFileAgree > reconParam.vldMergeSmallMinCountFileAgree and countFileLoc < countFileAgree*reconParam.vldMergeShortRatio)) and \
            ((nInlierTmp > reconParam.vldMergeNInliers and float(countFileAgree)/len(model2.reconFrame) > reconParam.vldMergeRatioAgrFReconFNInliers) or float(countFileAgree)/countFileLoc > reconParam.vldMergeRatioAgrFLocF) and
            (float(countFileAgree)/len(model2.reconFrame) > reconParam.vldMergeRatioAgrFReconF))):
        '''
        # update merge condition by T. Ishihara 2015.11.10
        '''
        if not sfm_merge_generated or \
            not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
                 countFileAgree > reconParam.vldMergeSmallMinCountFileAgree and \
                 countFileLoc < countFileAgree*reconParam.vldMergeShortRatio and \
                 ((nInlierTmp > reconParam.vldMergeNInliers and ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconFNInliers) or \
                    ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconF) and \
                 ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF):
        '''
        # update merge condition by T. Ishihara 2016.04.02
        '''
        if not sfm_merge_generated or \
            not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
                 ((nInlierTmp > reconParam.vldMergeNInliers and ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconFNInliers) or \
                    ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconF) and \
                 ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF):
        '''
        # update merge condition by T. Ishihara 2016.06.09
        '''
        if not sfm_merge_generated or \
            not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
                 ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF and \
                 nInlierTmp > reconParam.min3DnInliers and \
                 ratioInlierMatchPoints > reconParam.vldMergeRatioInliersMatchPoints):
        '''
        # update merge condition by T. Ishihara 2016.06.20
        if not sfm_merge_generated or \
            not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
                 ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF and \
                 nInlierTmp > reconParam.min3DnInliers):        
            print "Transformed locations do not agree with localization. Skip merge between " + model1.name + " and " + model2.name + "."
            
            '''
            if os.path.isfile(os.path.join(sfmOutPath,"sfm_data.json")):
                os.rename(os.path.join(sfmOutPath,"sfm_data.json"), \
                          os.path.join(sfmOutPath,"sfm_data_("+model1.name + "," + model2.name+").json"))
            '''
                            
            # move to next video
            return False, sfmModelIBeacon("","","","","","","",validMergeRansacThres=0,validMergeRansacThresK=0,
                                          ransacStructureThres=0, ransacStructureThresK=0, 
                                          mergeStructureThres=0, mergeStructureThresK=0)
                
        # generate colorized before bundle adjustment for comparison
        os.system("openMVG_main_ComputeSfM_DataColor " +
            " -i " + os.path.join(sfmOutPath,"sfm_data.json") +
            " -o " + os.path.join(sfmOutPath,"colorized_pre.ply"))        
        
        # TODO : try computing structure from know pose here
        # https://github.com/openMVG/openMVG/issues/246
        # http://openmvg.readthedocs.io/en/latest/software/SfM/ComputeStructureFromKnownPoses/
        
        # TODO : revisit the order of bundle adjustment
        # perform bundle adjustment
        '''
        os.system(reconParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + os.path.join(sfmOutPath,"sfm_data.json") + " " + os.path.join(sfmOutPath,"sfm_data.json") + \
                  " -c=" + "rs,rst,rsti" + " -r=" + "1")
        '''
        os.system(reconParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + os.path.join(sfmOutPath,"sfm_data.json") + " " + os.path.join(sfmOutPath,"sfm_data.json") + \
                  " -c=" + "rst,rsti" + " -r=" + "1")
        
        os.system("openMVG_main_ComputeSfM_DataColor " +
            " -i " + os.path.join(sfmOutPath,"sfm_data.json") +
            " -o " + os.path.join(sfmOutPath,"colorized.ply"))
        
        # write new beacon file
        IBeaconUtils.exportBeaconDataForSfmImageFrames(self.mCsvPath, resultSfMDataFile, os.path.join(self.mInputPath,"listbeacon.txt"),
                                                       os.path.join(sfmOutPath,"beacon.txt"), reconIBeaconParam.normApproach)
        
        return True, sfmModelIBeacon("A" + model1.name + "," + model2.name +"Z", self.mInputImgPath, self.mCsvPath, 
                                     os.path.join(sfmOutPath,"beacon.txt"), self.mMatchesPath, os.path.join(sfmOutPath,"loc"), 
                                     resultSfMDataFile, validMergeRansacThres=model1.validMergeRansacThres,
                                     ransacStructureThres=model1.ransacStructureThres, 
                                     mergeStructureThres=model1.mergeStructureThres)
def main():
    description = 'This script is for merging multiple SfM output models to one SfM model.' + \
                'Please prepare multiple OpenMVG projects which have output SfM models, and matrix to convert to global coordinate.'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('input_csv', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Input CSV file which lists OpenMVG projects which will be merged.')
    parser.add_argument('output_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Output directory path where merged model will be saved.')
    args = parser.parse_args()
    input_csv = args.input_csv
    output_dir = args.output_dir

    # load reconstruct parameters
    reconstructParam = ReconstructParam.ReconstructParam

    # read projects list
    projectList = []
    with open(input_csv, "r") as f:
        reader = csv.reader(f)
        for row in reader:
            project = {}
            project["dir"] = row[0]
            project["sfm_data"] = row[1]
            project["A"] = row[2]
            projectList.append(project)

    # copy source files to output directory
    for project in projectList:
        copyOriginalFiles(project["dir"], output_dir)

    # prepare output directory
    if not os.path.isdir(os.path.join(output_dir, "Ref")):
        FileUtils.makedir(os.path.join(output_dir, "Ref"))
    if not os.path.isdir(os.path.join(output_dir, "Ref", "loc")):
        FileUtils.makedir(os.path.join(output_dir, "Ref", "loc"))
    if not os.path.isdir(os.path.join(output_dir, "Output", "SfM")):
        FileUtils.makedir(os.path.join(output_dir, "Output", "SfM"))
    if not os.path.isdir(
            os.path.join(output_dir, "Output", "SfM", "reconstruction")):
        FileUtils.makedir(
            os.path.join(output_dir, "Output", "SfM", "reconstruction"))
    if not os.path.isdir(
            os.path.join(output_dir, "Output", "SfM", "reconstruction",
                         "global")):
        FileUtils.makedir(
            os.path.join(output_dir, "Output", "SfM", "reconstruction",
                         "global"))

    sfmDataList = []
    sfmViewBeaconDataList = []
    sfmBeaconMap = None
    for project in projectList:
        if not os.path.exists(project["sfm_data"]):
            print "cannot find sfm data : " + project["sfm_data"]
            sys.exit()
        with open(project["sfm_data"]) as jsonFile:
            sfmDataList.append(json.load(jsonFile))

        sfmBeaconFile = os.path.join(os.path.dirname(project["sfm_data"]),
                                     "beacon.txt")
        if os.path.exists(sfmBeaconFile):
            print "find beacon.txt for sfm data : " + project["sfm_data"]
            imgBeaconList, beaconMap = iBeaconUtils.readBeaconData(
                sfmBeaconFile)
            sfmViewBeaconDataList.append(imgBeaconList)
            if sfmBeaconMap is None:
                sfmBeaconMap = beaconMap
            else:
                if sfmBeaconMap != beaconMap:
                    print "invalid find beacon.txt for sfm data : " + project[
                        "sfm_data"]
                    print "beacon.txt should be same for all merged sfm_data"
                    sys.exit()
                else:
                    print "valid beacon.txt for sfm data : " + project[
                        "sfm_data"]

    AList = []
    for project in projectList:
        AList.append(np.loadtxt(project["A"]))
        print "load mat : " + project["A"]
        print(np.loadtxt(project["A"]))

    print "Load 3D points"
    pointIdList = []
    pointList = []
    for sfmData in sfmDataList:
        pointId, point = mergeSfM.getAll3DPointloc(sfmData)
        pointn = np.asarray(point, dtype=np.float).T

        pointIdList.append(pointId)
        pointList.append(pointn)

    # merge models
    mergeSfmData = None
    mergePointId = None
    mergePointn = None
    mergeSfmViewBeaconData = None
    for idx in range(0, len(sfmDataList)):
        if idx == 0:
            mergeSfmData = sfmDataList[0]
            mergeSfM.transform_sfm_data(mergeSfmData, AList[0])
            if len(sfmViewBeaconDataList) > 0:
                mergeSfmViewBeaconData = sfmViewBeaconDataList[0]
        else:
            ransacThres = mergeSfM.findMedianStructurePointsThres(
                mergeSfmData, reconstructParam.ransacStructureThresMul)
            print "thres to merge 3D points : " + str(ransacThres)

            inlierMap = findInliersByKnownTransform(mergePointId,
                                                    pointIdList[idx],
                                                    mergePointn,
                                                    pointList[idx], AList[idx],
                                                    ransacThres)
            print "number of points in base model : " + str(len(
                mergePointn[0]))
            print "number of points in model " + str(idx) + " : " + str(
                len(pointList[idx]))
            print "number of inliers : " + str(len(inlierMap))
            if len(sfmViewBeaconDataList) > 0:
                mergeSfM.merge_sfm_data(mergeSfmData, sfmDataList[idx],
                                        AList[idx],
                                        {x[0]: x[1]
                                         for x in inlierMap},
                                        mergeSfmViewBeaconData,
                                        sfmViewBeaconDataList[idx])
            else:
                mergeSfM.merge_sfm_data(mergeSfmData, sfmDataList[idx],
                                        AList[idx],
                                        {x[0]: x[1]
                                         for x in inlierMap})

        mergePointId, mergePoint = mergeSfM.getAll3DPointloc(mergeSfmData)
        mergePointn = np.asarray(mergePoint, dtype=np.float).T

    # go back to coordinate of the first model
    _invA = np.linalg.inv(AList[0][0:3, 0:3])
    invA = np.c_[_invA, -np.dot(_invA, AList[0][:, 3])]
    mergeSfM.transform_sfm_data(mergeSfmData, invA)

    mergeSfmData["root_path"] = os.path.join(output_dir, "Input", "inputImg")

    resultSfMDataFile = os.path.join(output_dir, "Output", "SfM",
                                     "reconstruction", "global",
                                     "sfm_data.json")

    with open(os.path.join(resultSfMDataFile), "w") as jsonfile:
        json.dump(mergeSfmData, jsonfile)

    if mergeSfmViewBeaconData is not None:
        mergeSfmViewBeaconDataMapList = []
        for key in mergeSfmViewBeaconData:
            mergeSfmViewBeaconDataMap = {}
            mergeSfmViewBeaconDataMap[key] = mergeSfmViewBeaconData[key]
            mergeSfmViewBeaconDataMapList.append(mergeSfmViewBeaconDataMap)
        iBeaconUtils.exportBeaconData(
            len(mergeSfmData["views"]), sfmBeaconMap,
            mergeSfmViewBeaconDataMapList,
            os.path.join(os.path.dirname(resultSfMDataFile), "beacon.txt"))
    '''
    os.system(reconstructParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + resultSfMDataFile + " " + resultSfMDataFile)
    '''

    Amat = AList[0]
    with open(os.path.join(output_dir, "Ref", "Amat.txt"), "w") as AmatFile:
        np.savetxt(AmatFile, Amat)
    FileUtils.convertNumpyMatTxt2OpenCvMatYml(
        os.path.join(output_dir, "Ref", "Amat.txt"),
        os.path.join(output_dir, "Ref", "Amat.yml"), "A")

    # To create same directory structure before merging, create sfm_data.json without structure information in matches directory
    with open(resultSfMDataFile) as fpr:
        sfmData = json.load(fpr)
        sfmData["extrinsics"] = []
        sfmData["control_points"] = []
        sfmData["structure"] = []
        with open(
                os.path.join(output_dir, "Output", "matches", "sfm_data.json"),
                "w") as fpw:
            json.dump(sfmData, fpw)

    print "Execute : " + reconstructParam.WORKSPACE_DIR + "/TrainBoW/Release/TrainBoW " + os.path.join(output_dir,"Output") + " " + \
              os.path.join(output_dir,"Output", "matches", "BOWfile.yml") + " -p=" + os.path.join(output_dir,"Output", "matches", "PCAfile.yml")
    os.system(reconstructParam.WORKSPACE_DIR + "/TrainBoW/Release/TrainBoW " + os.path.join(output_dir,"Output") + " " + \
              os.path.join(output_dir,"Output", "matches", "BOWfile.yml") + " -p=" + os.path.join(output_dir,"Output", "matches", "PCAfile.yml"))

    os.system("openMVG_main_ComputeSfM_DataColor -i " + resultSfMDataFile + \
              " -o " + os.path.join(output_dir,"Output","SfM","reconstruction","global","colorized.ply"))
def main():
    description = 'This script is for merging multiple SfM output models to one SfM model.' + \
                'Please prepare multiple OpenMVG projects which have output SfM models, and matrix to convert to global coordinate.'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('input_csv', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Input CSV file which lists OpenMVG projects which will be merged.')
    parser.add_argument('output_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Output directory path where merged model will be saved.')
    args = parser.parse_args()
    input_csv = args.input_csv
    output_dir = args.output_dir
        
    # load reconstruct parameters
    reconstructParam = ReconstructParam.ReconstructParam
    
    # read projects list
    projectList = []
    with open(input_csv, "r") as f:
        reader = csv.reader(f)
        for row in reader:
            project = {}
            project["dir"]  = row[0]
            project["sfm_data"]  = row[1]
            project["A"] = row[2]
            projectList.append(project)
    
    # copy source files to output directory
    for project in projectList:
        copyOriginalFiles(project["dir"], output_dir)
    
    # prepare output directory
    if not os.path.isdir(os.path.join(output_dir,"Ref")):
        FileUtils.makedir(os.path.join(output_dir,"Ref"))
    if not os.path.isdir(os.path.join(output_dir,"Ref","loc")):
        FileUtils.makedir(os.path.join(output_dir,"Ref","loc"))
    if not os.path.isdir(os.path.join(output_dir,"Output","SfM")):
        FileUtils.makedir(os.path.join(output_dir,"Output","SfM"))
    if not os.path.isdir(os.path.join(output_dir,"Output","SfM","reconstruction")):
        FileUtils.makedir(os.path.join(output_dir,"Output","SfM","reconstruction"))
    if not os.path.isdir(os.path.join(output_dir,"Output","SfM","reconstruction","global")):
        FileUtils.makedir(os.path.join(output_dir,"Output","SfM","reconstruction","global"))
    
    sfmDataList = []
    sfmViewBeaconDataList = []
    sfmBeaconMap = None
    for project in projectList:
        if not os.path.exists(project["sfm_data"]):
            print "cannot find sfm data : " + project["sfm_data"]
            sys.exit()
        with open(project["sfm_data"]) as jsonFile:
            sfmDataList.append(json.load(jsonFile))
        
        sfmBeaconFile = os.path.join(os.path.dirname(project["sfm_data"]), "beacon.txt")
        if os.path.exists(sfmBeaconFile):
            print "find beacon.txt for sfm data : " + project["sfm_data"]
            imgBeaconList, beaconMap = iBeaconUtils.readBeaconData(sfmBeaconFile)
            sfmViewBeaconDataList.append(imgBeaconList)
            if sfmBeaconMap is None:
                sfmBeaconMap = beaconMap
            else:
                if sfmBeaconMap!=beaconMap:
                    print "invalid find beacon.txt for sfm data : " + project["sfm_data"]
                    print "beacon.txt should be same for all merged sfm_data"
                    sys.exit()
                else:
                    print "valid beacon.txt for sfm data : " + project["sfm_data"]
    
    AList = []
    for project in projectList:
        AList.append(np.loadtxt(project["A"]))
        print "load mat : " + project["A"]
        print (np.loadtxt(project["A"]))
    
    print "Load 3D points"
    pointIdList = []
    pointList = []
    for sfmData in sfmDataList:
        pointId, point = mergeSfM.getAll3DPointloc(sfmData)
        pointn = np.asarray(point, dtype=np.float).T
        
        pointIdList.append(pointId)
        pointList.append(pointn)
    
    # merge models
    mergeSfmData = None
    mergePointId = None
    mergePointn = None
    mergeSfmViewBeaconData = None
    for idx in range(0, len(sfmDataList)):
        if idx==0:
            mergeSfmData = sfmDataList[0]
            mergeSfM.transform_sfm_data(mergeSfmData, AList[0])
            if len(sfmViewBeaconDataList)>0:
                mergeSfmViewBeaconData = sfmViewBeaconDataList[0]
        else:
            mergePointThres = mergeSfM.findMedianStructurePointsThres(mergeSfmData, reconstructParam.mergePointThresMul)
            print "thres to merge 3D points : " + str(mergePointThres)
            
            inlierMap = findInliersByKnownTransform(mergePointId, pointIdList[idx], mergePointn, pointList[idx], AList[idx], mergePointThres)
            print "number of points in base model : " + str(len(mergePointn[0]))
            print "number of points in model " + str(idx) + " : " + str(len(pointList[idx]))
            print "number of inliers : " + str(len(inlierMap))
            if len(sfmViewBeaconDataList)>0:
                mergeSfM.merge_sfm_data(mergeSfmData, sfmDataList[idx], AList[idx], {x[0]: x[1] for x in inlierMap}, mergeSfmViewBeaconData, sfmViewBeaconDataList[idx])
            else:
                mergeSfM.merge_sfm_data(mergeSfmData, sfmDataList[idx], AList[idx], {x[0]: x[1] for x in inlierMap})
        
        mergePointId, mergePoint = mergeSfM.getAll3DPointloc(mergeSfmData)
        mergePointn = np.asarray(mergePoint, dtype=np.float).T
    
    # go back to coordinate of the first model
    _invA = np.linalg.inv(AList[0][0:3,0:3])
    invA = np.c_[_invA, -np.dot(_invA,AList[0][:,3])]
    mergeSfM.transform_sfm_data(mergeSfmData, invA)
    
    mergeSfmData["root_path"] = os.path.join(output_dir,"Input","inputImg")
    
    resultSfMDataFile = os.path.join(output_dir,"Output","SfM","reconstruction","global","sfm_data.json")
    
    with open(os.path.join(resultSfMDataFile),"w") as jsonfile:
        json.dump(mergeSfmData, jsonfile)
    
    if mergeSfmViewBeaconData is not None:
        mergeSfmViewBeaconDataMapList = []
        for key in mergeSfmViewBeaconData:
            mergeSfmViewBeaconDataMap = {}
            mergeSfmViewBeaconDataMap[key] = mergeSfmViewBeaconData[key]
            mergeSfmViewBeaconDataMapList.append(mergeSfmViewBeaconDataMap)
        iBeaconUtils.exportBeaconData(len(mergeSfmData["views"]), sfmBeaconMap, mergeSfmViewBeaconDataMapList, 
                                      os.path.join(os.path.dirname(resultSfMDataFile), "beacon.txt"))
    
    '''
    os.system(reconstructParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + resultSfMDataFile + " " + resultSfMDataFile)
    '''
    os.system(reconstructParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + resultSfMDataFile + " " + resultSfMDataFile + \
              " -c=" + "rst,rsti" + " -r=" + "1")
    
    Amat = AList[0]
    with open(os.path.join(output_dir,"Ref","Amat.txt"),"w") as AmatFile:
        np.savetxt(AmatFile,Amat)
    FileUtils.convertNumpyMatTxt2OpenCvMatYml(os.path.join(output_dir,"Ref","Amat.txt"), os.path.join(output_dir,"Ref","Amat.yml"), "A")
    
    # To create same directory structure before merging, create sfm_data.json without structure information in matches directory
    with open(resultSfMDataFile) as fpr:
        sfmData = json.load(fpr)
        sfmData["extrinsics"] = []
        sfmData["control_points"] = []
        sfmData["structure"] = []
        with open(os.path.join(output_dir,"Output","matches","sfm_data.json"),"w") as fpw:
            json.dump(sfmData, fpw)
    
    print "Execute : " + reconstructParam.WORKSPACE_DIR + "/TrainBoW/Release/TrainBoW " + os.path.join(output_dir,"Output") + " " + \
              os.path.join(output_dir,"Output", "matches", "BOWfile.yml") + " -p=" + os.path.join(output_dir,"Output", "matches", "PCAfile.yml")
    os.system(reconstructParam.WORKSPACE_DIR + "/TrainBoW/Release/TrainBoW " + os.path.join(output_dir,"Output") + " " + \
              os.path.join(output_dir,"Output", "matches", "BOWfile.yml") + " -p=" + os.path.join(output_dir,"Output", "matches", "PCAfile.yml"))
    
    os.system("openMVG_main_ComputeSfM_DataColor -i " + resultSfMDataFile + \
              " -o " + os.path.join(output_dir,"Output","SfM","reconstruction","global","colorized.ply"))
def main():
    description = 'This script is for testing posenet'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('input_txt_file', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='File path of input txt file in Cambridge Visual Landmark Dataset format.')
    parser.add_argument('input_beacon_setting_file', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='File path where beacon setting file is saved.')
    parser.add_argument('input_pb_file', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='File path of model pb file.')
    parser.add_argument('result_log_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where localization result files are saved.')
    parser.add_argument('-f', '--use_fixed_input_mean_std', action='store_true', default=False, \
                        help='Use fixed input mean and std (default: False)')
    args = parser.parse_args()
    input_txt_file = args.input_txt_file
    input_beacon_setting_file = args.input_beacon_setting_file
    input_pb_file = args.input_pb_file
    result_log_dir = args.result_log_dir
    use_fixed_input_mean_std = args.use_fixed_input_mean_std

    input_model_dir = os.path.dirname(input_pb_file)
    input_numpy_mean_beacon_file = os.path.join(input_model_dir,
                                                "mean_beacon.npy")
    if use_fixed_input_mean_std:
        input_numpy_mean_beacon = None
    else:
        input_numpy_mean_beacon = np.load(input_numpy_mean_beacon_file)
    output_summary_log_file = os.path.join(result_log_dir, "summary-log.txt")
    output_hist_log_file = os.path.join(result_log_dir, "hist-log.txt")

    # parse beacon setting file
    beaconmap = IBeaconUtils.parseBeaconSetting(input_beacon_setting_file)
    beacon_num = len(beaconmap.keys())

    beacons = tf.placeholder(tf.float32, [1, beacon_num, 1, 1])
    datasource = get_data(input_txt_file, beaconmap, input_numpy_mean_beacon,
                          use_fixed_input_mean_std)
    results = np.zeros((len(datasource.beacons), 2))

    # Set GPU options
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)

    time_array = np.array([])

    # Load model
    graph = load_graph(input_pb_file)
    input_beacon_name = "import/" + input_beacon_layer_name
    output_pos_name = "import/" + output_pos_layer_name
    output_rot_name = "import/" + output_rot_layer_name
    input_operation = graph.get_operation_by_name(input_beacon_name)
    output_pos_operation = graph.get_operation_by_name(output_pos_name)
    output_rot_operation = graph.get_operation_by_name(output_rot_name)

    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options),
                    graph=graph) as sess:
        for i in range(len(datasource.beacons)):
            np_beacon = datasource.beacons[i]
            feed = {input_operation.outputs[0]: np_beacon}

            pose_q = np.asarray(datasource.poses[i][3:7])
            pose_x = np.asarray(datasource.poses[i][0:3])
            start_time = time.time()
            predicted_x, predicted_q = sess.run([
                output_pos_operation.outputs[0],
                output_rot_operation.outputs[0]
            ],
                                                feed_dict=feed)
            elapsed_time = time.time() - start_time
            time_array = np.append(time_array, elapsed_time)

            pose_q = np.squeeze(pose_q)
            pose_x = np.squeeze(pose_x)
            predicted_q = np.squeeze(predicted_q)
            predicted_x = np.squeeze(predicted_x)

            # calculate error
            q1 = pose_q / np.linalg.norm(pose_q)
            q2 = predicted_q / np.linalg.norm(predicted_q)
            d = abs(np.sum(np.multiply(q1, q2)))
            # fix floating point inaccuracy
            if d < -1.0:
                d = -1.0
            if d > 1.0:
                d = 1.0
            theta = 2 * np.arccos(d) * 180 / math.pi
            error_x = np.linalg.norm(pose_x - predicted_x)
            results[i, :] = [error_x, theta]
            print 'Index=', i, ' , Pos Error(m)=', error_x, ',  Rot Error(degrees)=', theta

    # write histgram results
    bin_edge = [0.01 * float(x) for x in range(0, 1001)]
    dist_errors = results[:, 0]
    dist_hist, dist_hist_bins = np.histogram(dist_errors, bins=bin_edge)
    dist_hist_cum_ratio = np.cumsum(dist_hist) / float(len(datasource.beacons))
    print "Histogram of error: " + str(dist_hist)
    print "Cumulative ratio of error: " + str(dist_hist_cum_ratio)
    print "Total loc err larger than " + str(
        np.max(bin_edge)) + " meters: " + str(
            len(datasource.beacons) - np.sum(dist_hist))

    # write summary of results
    mean_result = np.mean(results, axis=0)
    std_result = np.std(results, axis=0)
    median_result = np.median(results, axis=0)
    max_result = np.max(results, axis=0)
    percentile_80_result = np.percentile(results, 80, axis=0)
    percentile_90_result = np.percentile(results, 90, axis=0)
    percentile_95_result = np.percentile(results, 95, axis=0)
    print 'Mean error ', mean_result[0], 'm  and ', mean_result[1], 'degrees.'
    print 'StdDev error ', std_result[0], 'm  and ', std_result[1], 'degrees.'
    print 'Median error ', median_result[0], 'm  and ', median_result[
        1], 'degrees.'
    print 'Max error ', max_result[0], 'm  and ', max_result[1], 'degrees.'
    print '80 percentile error ', percentile_80_result[
        0], 'm  and ', percentile_80_result[1], 'degrees.'
    print '90 percentile error ', percentile_90_result[
        0], 'm  and ', percentile_90_result[1], 'degrees.'
    print '95 percentile error ', percentile_95_result[
        0], 'm  and ', percentile_95_result[1], 'degrees.'
    print 'Mean time ', str(np.average(time_array))
    print 'StdDev time ', str(np.std(time_array))
    print 'Median time ', str(np.median(time_array))
    with open(output_summary_log_file, "w") as fw:
        fw.write("Number of test image = " + str(len(datasource.beacons)) +
                 "\n")
        fw.write("Mean error = " + str(mean_result[0]) + " meters." + "\n")
        fw.write("StdDev error = " + str(std_result[0]) + " meters." + "\n")
        fw.write("Median error = " + str(median_result[0]) + " meters." + "\n")
        fw.write("Max error = " + str(max_result[0]) + " meters." + "\n")
        fw.write("80 percentile error = " + str(percentile_80_result[0]) +
                 " meters." + "\n")
        fw.write("90 percentile error = " + str(percentile_90_result[0]) +
                 " meters." + "\n")
        fw.write("95 percentile error = " + str(percentile_95_result[0]) +
                 " meters." + "\n")
        fw.write("\n")
        fw.write("Mean error = " + str(mean_result[1]) + " degrees." + "\n")
        fw.write("StdDev error = " + str(std_result[1]) + " degrees." + "\n")
        fw.write("Median error = " + str(median_result[1]) + " degrees." +
                 "\n")
        fw.write("Max error = " + str(max_result[1]) + " degrees." + "\n")
        fw.write("80 percentile error = " + str(percentile_80_result[1]) +
                 " degrees." + "\n")
        fw.write("90 percentile error = " + str(percentile_90_result[1]) +
                 " degrees." + "\n")
        fw.write("95 percentile error = " + str(percentile_95_result[1]) +
                 " degrees." + "\n")
        fw.write("\n")
        fw.write("Histogram of error: " + str(dist_hist) + "\n")
        fw.write("Cumulative ratio: " + str(
            np.around(
                np.cumsum(dist_hist, dtype=float) /
                len(datasource.beacons), 2)) + "\n")
        fw.write("Total loc err larger than " + str(np.max(bin_edge)) +
                 " meters: " +
                 str(len(datasource.beacons) - np.sum(dist_hist)) + "\n")
        fw.write("\n")
        fw.write("Mean time = " + str(np.average(time_array)) + "\n")
        fw.write("StdDev time = " + str(np.std(time_array)) + "\n")
        fw.write("Median time = " + str(np.median(time_array)) + "\n")
    # write error histgram
    np.savetxt(output_hist_log_file,
               zip(dist_hist_bins, dist_hist_cum_ratio),
               delimiter=',')
    # write error histgram
    np.savetxt(output_hist_log_file,
               zip(dist_hist_bins, dist_hist_cum_ratio),
               delimiter=',')
def main():
    # set parameter
    reconstructParam = ReconstructParam.ReconstructParam
    reconstructIBeaconParam = ReconstructIBeaconParam.ReconstructIBeaconParam
    reconstructBOWParam = ReconstructBOWParam.ReconstructBOWParam

    # parse parameters
    description = 'This script is for reconstruct 3D models from multiple videos and merge to one 3D model. ' + \
                'iBeacon is used for accelerating 3D model merge. ' + \
                'Please prepare multiple videos in Input folder.'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('project_path', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where your 3D model project is stored.')
    parser.add_argument('-k', '--path-camera-file', action='store', nargs='?', const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='File path where camera matrix is stored in Numpy text format. (default: focal length ' + \
                            str(reconstructParam.focalLength) + ' will be used)')    
    parser.add_argument('--bow', action='store_true', default=False, \
                        help='Use BOW to accelerate 3D model merge if this flag is set (default: False)')    
    args = parser.parse_args()
    PROJECT_PATH = args.project_path
    USE_BOW = args.bow
    PATH_CAMERA_FILE = args.path_camera_file
    
    if PATH_CAMERA_FILE:
        if os.path.exists(PATH_CAMERA_FILE):
            with open(PATH_CAMERA_FILE,"r") as camMatFile:
                K = np.loadtxt(camMatFile)
            if K.shape[0]!=3 or K.shape[1]!=3:
                print "Error : invalid camera matrix size = " + str(K)
                sys.exit()
            print "Focal length " + str(K[0][0]) + " is set for reconstruction"
            reconstructParam.focalLength = K[0][0]
        else:
            print "Error : invalid camera matrix file = " + PATH_CAMERA_FILE
            sys.exit()
    
    # get paths
    inputPath = os.path.join(PROJECT_PATH, "Input")
    outputPath = os.path.join(PROJECT_PATH, "Output")
    
    FileUtils.makedir(outputPath)
    
    # reconstruct all videos
    listVideo = sorted(os.listdir(inputPath))
    for video in listVideo:
        if not os.path.isdir(os.path.join(inputPath, video)):
            continue
        
        print "Begin reconstructing video : " + video
        
        sfm_mainDir = os.path.join(outputPath, video)
        sfm_inputDir = os.path.join(inputPath, video)
        sfm_inputImgDir = os.path.join(sfm_inputDir, "inputImg")
        sfm_matchesDir = os.path.join(sfm_mainDir, "matches")
        sfm_sfmDir = os.path.join(sfm_mainDir, "SfM")
        sfm_reconstructDir = os.path.join(sfm_sfmDir, "reconstruction")
        sfm_globalDir = os.path.join(sfm_reconstructDir, "global")
                    
        FileUtils.makedir(sfm_mainDir)
        FileUtils.makedir(sfm_inputImgDir)
        FileUtils.makedir(sfm_matchesDir)
        FileUtils.makedir(sfm_sfmDir)
        FileUtils.makedir(sfm_reconstructDir)
        FileUtils.makedir(sfm_globalDir)
        
        if not os.path.isfile(os.path.join(sfm_globalDir, "sfm_data.json")):
            # list images
            os.system("openMVG_main_SfMInit_ImageListing -i " + sfm_inputImgDir + " -o " + sfm_matchesDir + " -d " + reconstructParam.CAMERA_DATABASE_PATH)
            
            # 1.1 Check intrinsic
            # ( if camera parameter not specified then replace with fixed camera.
            # and set appropriate width and height)
            with open(os.path.join(sfm_matchesDir, "sfm_data.json")) as sfm_data_file:
                sfm_data = json.load(sfm_data_file)
                hImg = sfm_data["views"][0]['value']['ptr_wrapper']['data']["height"]
                wImg = sfm_data["views"][0]['value']['ptr_wrapper']['data']["width"]
                if len(sfm_data["intrinsics"]) == 0:
                    for view in sfm_data["views"]:
                        view["value"]["ptr_wrapper"]["data"]["id_intrinsic"] = 0;
                        
                    sfm_data["intrinsics"].append({})
                    sfm_data["intrinsics"][0]["key"] = 0
                    sfm_data["intrinsics"][0]["values"] = {}
                    # sfm_data["intrinsics"][0]["values"]["polymorphic_name"] = "pinhole_radial_k3"
                    sfm_data["intrinsics"][0]["values"]["polymorphic_name"] = "pinhole"
                    sfm_data["intrinsics"][0]["values"]["polymorphic_id"] = 2147999999
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"] = {}
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["id"] = 2147483660
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"] = {}
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"]["width"] = wImg
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"]["height"] = hImg
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"]["focal_length"] = reconstructParam.focalLength
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"]["disto_k3"] = [0, 0, 0]
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"]["principal_point"] = [wImg / 2, hImg / 2]
                    
            with open(os.path.join(sfm_matchesDir, "sfm_data.json"), "w") as sfm_data_file:
                json.dump(sfm_data, sfm_data_file)
                
            # 2 - Features computation and matching
            # ( Compute per image a list of features & descriptors )
            IBeaconUtils.exportBeaconDataForSfmImageFrames(os.path.join(sfm_inputDir, "csv"), os.path.join(sfm_matchesDir, "sfm_data.json"), 
                                                           os.path.join(inputPath, "listbeacon.txt"), os.path.join(sfm_matchesDir, "beacon.txt"), 
                                                           reconstructIBeaconParam.normApproach)
            
            os.system(reconstructParam.EXTRACT_FEATURE_MATCH_PROJECT_PATH + \
                      " " + sfm_matchesDir + \
                      " -mf=" + str(reconstructParam.maxTrackletMatchDistance) + \
                      " -mm=" + str(reconstructParam.minMatchToRetain) + \
                      " -f=" + str(reconstructParam.extFeatDistRatio) + \
                      " -r=" + str(reconstructParam.extFeatRansacRound))
            
            # OpenMVG assumes matches.e.txt for global reconstruction, matches.f.txt for incremental reconstruction
            os.system("cp " + os.path.join(sfm_matchesDir, "matches.f.txt") + " " + os.path.join(sfm_matchesDir, "matches.e.txt"))
            
            # 3 - Global reconstruction
            countRecon = 1
            while not os.path.isfile(os.path.join(sfm_globalDir, "sfm_data.json")) and countRecon < reconstructParam.rerunRecon:  
                os.system("openMVG_main_GlobalSfM -i " + os.path.join(sfm_matchesDir, "sfm_data.json") + " -m " + sfm_matchesDir + " -o " + sfm_globalDir)
                # for OpenMVG 1.0
                #os.system("openMVG_main_ConvertSfM_DataFormat -i " + os.path.join(sfm_globalDir, "sfm_data.bin") + " -o " + os.path.join(sfm_globalDir, "sfm_data.json"))
                countRecon = countRecon + 1
                time.sleep(1)
            
            if not os.path.isfile(os.path.join(sfm_globalDir, "sfm_data.json")):
                continue
                
            # 4 - Color the pointcloud
            os.system("openMVG_main_ComputeSfM_DataColor -i " + os.path.join(sfm_globalDir, "sfm_data.json") + " -o " + os.path.join(sfm_globalDir, "colorized.ply"))
            
            # 4.5 remove part of reconstruction where it is incorrect
            # Specifically, sometimes when their matching is not adequate,
            # the reconstructed model will be divided into two or more models
            # with different scale and a "jump" between pose translation.
            # This function detects such jump and retain the the largest 
            # beginning or ending part of reconstruction, while the rest
            # should be reconstructed separately by cleanSfM.
            countCut = 0
            # keep cutting until no more cut
            while cleanSfM.cutSfMDataJump(os.path.join(sfm_globalDir, "sfm_data.json"), bufferFrame=reconstructParam.bufferFrame):
                countCut = countCut + 1
                os.rename(os.path.join(sfm_globalDir, "sfm_data_BC.json"),
                          os.path.join(sfm_globalDir, "sfm_data_BC" + str(countCut) + ".json"))
                # modified by T. Ishihara 2016.06.14
                # change bundle adjustment parameter for cutted model
                '''                
                os.system(reconstructParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + \
                          " " + os.path.join(sfm_globalDir, "sfm_data.json") + \
                          " " + os.path.join(sfm_globalDir, "sfm_data.json") + \
                          " -c=" + "rs,rst,rsti")
                '''
                os.system(reconstructParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + \
                          " " + os.path.join(sfm_globalDir, "sfm_data.json") + \
                          " " + os.path.join(sfm_globalDir, "sfm_data.json") + \
                          " -c=" + "rst,rsti" + " -r=" + "1")
            os.system("openMVG_main_ComputeSfM_DataColor -i " + os.path.join(sfm_globalDir, "sfm_data.json") + " -o " + os.path.join(sfm_globalDir, "colorized_AC.ply"))
            
            # 5 - Clean sfm_data by removing viewID of frames that are not used
            # in reconstruction and put them in another folder and reconstruct them again
            # note that sfm_data.json in matches folder is renamed and kept as reference
            unusedImg = cleanSfM.cleanSfM(os.path.join(sfm_globalDir, "sfm_data.json"),
                                 [os.path.join(sfm_matchesDir, "matches.putative.txt"),
                                  os.path.join(sfm_matchesDir, "matches.e.txt"),
                                  os.path.join(sfm_matchesDir, "matches.f.txt")])
            
            # 6. move unused images, csv files into a new folder unless they have less than x images
            for i in range(0, len(unusedImg)):
                listUnused = unusedImg[i]
                if len(listUnused) < reconstructParam.minUnusedImgLength:
                    continue
                
                # set name for new video
                if i == 0:
                    newVidName = video + "_front"
                elif i == 1:
                    newVidName = video + "_back"
                else:
                    # this should not be called
                    continue
                
                # set path
                pathNewVid = os.path.join(inputPath, newVidName)
                
                # skip if there is already this folder
                if os.path.isdir(pathNewVid):
                    continue
                
                print "Extract unused part of " + video + " into " + newVidName
                
                FileUtils.makedir(pathNewVid)
                
                csvNewVid = os.path.join(pathNewVid, "csv")
                imgNewVid = os.path.join(pathNewVid, "inputImg")
                FileUtils.makedir(csvNewVid)
                FileUtils.makedir(imgNewVid)
                
                # copy image in list and csv file
                os.system("cp -s " + os.path.join(sfm_inputDir, "csv", "*.csv") + " " + csvNewVid)
                for unusedFilename in listUnused:
                    os.system("cp -s " + os.path.join(sfm_inputImgDir, unusedFilename) + " " + imgNewVid)
                
                # append the folder into reconstruction queue
                listVideo.append(newVidName)
    
    # train bag of words model, and extract bag of words feature for all images
    if USE_BOW and not os.path.isfile(os.path.join(outputPath, "merge_result", "Output", "matches", "BOWfile.yml")):
        outputBowPath = os.path.join(outputPath, "merge_result", "Output", "matches")
        if not os.path.isdir(outputBowPath):
            FileUtils.makedir(outputBowPath)
        print "Execute Training BOW : " + reconstructParam.WORKSPACE_DIR + "/TrainBoW/Release/TrainBoW " + outputPath + " " + \
                  os.path.join(outputBowPath, "BOWfile.yml") + " -p=" + os.path.join(outputBowPath, "PCAfile.yml")
        os.system(reconstructParam.WORKSPACE_DIR + "/TrainBoW/Release/TrainBoW " + outputPath + " " + \
                  os.path.join(outputBowPath, "BOWfile.yml") + " -p=" + os.path.join(outputBowPath, "PCAfile.yml"))
    
    # load graph structure from "mergeGraph.txt" if it exists
    # create new graph structure if it does not exist
    if os.path.isfile(os.path.join(outputPath, "merge_result", "Output", "SfM", "reconstruction", "mergeGraph.txt")):
        sfmGraph = sfmMergeGraphIBeacon.sfmGraphIBeacon.load(os.path.join(outputPath, "merge_result", "Output", "SfM", "reconstruction", "mergeGraph.txt"))
        sfmGraph.workspacePath = reconstructParam.WORKSPACE_DIR
        
        #### start of manually adding new model code ####
        # In current code, you cannot add new 3D model once you start merging.
        # Enable following commented code to add new 3D model after you already started merging.
        '''
        newModelToAdd = []
        for newModelName in newModelToAdd:
            addModel(newModelName,os.path.join(inputPath,newModelName),os.path.join(outputPath,newModelName))
        sfmGraph.clearBadMatches()
        '''
        ### end of manually adding new model code ###
    else:
        sfmGraph = sfmMergeGraphIBeacon.sfmGraphIBeacon(inputPath,
                                          outputPath,
                                          os.path.join(outputPath, "merge_result", "Input"),
                                          os.path.join(outputPath, "merge_result", "Output", "SfM", "reconstruction"),
                                          os.path.join(outputPath, "merge_result", "Output", "matches"),
                                          os.path.join(outputPath, "merge_result", "Input", "csv"),
                                          os.path.join(outputPath, "merge_result", "Input", "inputImg"),
                                          reconstructParam.WORKSPACE_DIR,
                                          USE_BOW,
                                          validMergeRansacThresK=reconstructParam.vldMergeAgrFrameThresK,                                          
                                          minReconFrame=reconstructParam.minReconFrameToAdd)
    
    sfmGraph.mergeModel(os.path.join(inputPath, "listbeacon.txt"),
                        os.path.join(outputPath, listVideo[0], "matches", "image_describer.txt"),
                        inputPath,
                        outputPath,
                        reconParam=reconstructParam,
                        reconIBeaconParam=reconstructIBeaconParam,
                        reconBOWParam=reconstructBOWParam)
    
    # select largest 3D model and save it
    SfMDataUtils.saveFinalSfM(PROJECT_PATH)
Esempio n. 13
0
def main():
    description = 'This script is for testing posenet'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('input_txt_file', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='File path of input txt file in Cambridge Visual Landmark Dataset format.')
    parser.add_argument('input_beacon_setting_file', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='File path where beacon setting file is saved.')
    parser.add_argument('output_model_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where output models will be saved.')
    parser.add_argument('output_log_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where log files will be saved.')
    parser.add_argument('-i', '--input_image_weight_file', action='store', type=str, default=None, \
                        help='File path image posenet weiht is stored in numpy format.')
    parser.add_argument('-w', '--input_beacon_weight_file', action='store', type=str, default=None, \
                        help='File path beacon posenet weiht is stored in numpy format.')
    parser.add_argument('-l', '--loss_beta', action='store', type=float, default=posenet_config.loss_beta, \
                        help='Beta for loss function (Default : ' + str(posenet_config.loss_beta))
    parser.add_argument('-s', '--use_shrink_model', action='store_true', default=False, \
                        help='Use shrink model (default: False)')
    parser.add_argument('-f', '--use_fixed_input_mean_std', action='store_true', default=False, \
                        help='Use fixed image mean and std (default: False)')
    parser.add_argument('-a', '--use_augmentation_beacon', action='store_true', default=False, \
                        help='Use data augmentation for beacon data (default: False)')
    parser.add_argument('-m', '--base_model', action='store', type=str, default=posenet_config.base_model, \
                        help='Base model : inception-v1/inception-v3/mobilenet-v1 (Default : ' + str(posenet_config.base_model))
    parser.add_argument('-e', '--epochs', action='store', type=int, default=posenet_config.epochs, \
                        help='Epochs (Default : ' + str(posenet_config.epochs))
    parser.add_argument('-b', '--batch_size', action='store', type=int, default=posenet_config.batch_size, \
                        help='Batch size (Default : ' + str(posenet_config.batch_size))
    args = parser.parse_args()
    input_txt_file = args.input_txt_file
    input_beacon_setting_file = args.input_beacon_setting_file
    output_model_dir = args.output_model_dir
    output_log_dir = args.output_log_dir
    input_image_weight_file = args.input_image_weight_file
    input_beacon_weight_file = args.input_beacon_weight_file
    posenet_config.loss_beta = args.loss_beta
    use_shrink_model = args.use_shrink_model
    use_fixed_input_mean_std = args.use_fixed_input_mean_std
    use_augmentation_beacon = args.use_augmentation_beacon
    posenet_config.base_model = args.base_model
    posenet_config.epochs = args.epochs
    posenet_config.batch_size = args.batch_size
    print "base model : " + str(posenet_config.base_model)
    print "epochs : " + str(posenet_config.epochs)
    print "batch size : " + str(posenet_config.batch_size)
    print "loss weight value beta : " + str(posenet_config.loss_beta)
    print "use shrink model for training : " + str(use_shrink_model)
    print "use fixed input mean and std : " + str(use_fixed_input_mean_std)
    print "use beacon data augmentation : " + str(use_augmentation_beacon)
    if posenet_config.base_model != "inception-v1" and posenet_config.base_model != "inception-v3" and posenet_config.base_model != "mobilenet-v1":
        print "invalid base model : " + posenet_config.base_model
        sys.exit()
    if input_image_weight_file is None or input_beacon_weight_file is None:
        print "please specify initial weight for image and beacon"
        sys.exit()

    # parse beacon setting file
    beaconmap = IBeaconUtils.parseBeaconSetting(input_beacon_setting_file)
    beacon_num = len(beaconmap.keys())

    input_image_dir = os.path.dirname(input_txt_file)
    output_numpy_mean_image_file = os.path.join(output_model_dir,
                                                "mean_image.npy")
    output_numpy_mean_beacon_file = os.path.join(output_model_dir,
                                                 "mean_beacon.npy")
    output_numpy_model_file = os.path.join(output_model_dir, "model.npy")
    output_model_file = os.path.join(output_model_dir, "model.ckpt")

    if posenet_config.base_model == "inception-v1":
        image_size = 224
        output_auxiliary = True
    elif posenet_config.base_model == "inception-v3":
        image_size = 299
        output_auxiliary = False
    elif posenet_config.base_model == "mobilenet-v1":
        image_size = 224
        output_auxiliary = False
    else:
        print "invalid base model : " + posenet_config.base_model
        sys.exit()

    datasource, mean_image, mean_beacon = posenet_data_utils.get_image_beacon_data(
        input_txt_file,
        input_image_dir,
        beaconmap,
        beacon_num,
        use_fixed_input_mean_std,
        use_augmentation_beacon,
        image_size=image_size)
    if use_fixed_input_mean_std:
        print("Skip save mean image and beacon")
    else:
        with open(output_numpy_mean_image_file, 'wb') as fw:
            np.save(fw, mean_image)
        print("Save mean image at: " + output_numpy_mean_image_file)
        with open(output_numpy_mean_beacon_file, 'wb') as fw:
            np.save(fw, mean_beacon)
        print("Save mean beacon at: " + output_numpy_mean_beacon_file)

    # Set GPU options
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)
    session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    KTF.set_session(session)

    # Train model
    adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=0.00000001)
    if use_shrink_model:
        if posenet_config.base_model == "inception-v1":
            model = posenet_image_beacon_no_inception_shrink_keras.create_posenet_inception_v1(
                beacon_num, input_image_weight_file, input_beacon_weight_file)
            model.compile(optimizer=adam,
                          loss={
                              'image_beacon_cls1_fc_pose_xyz':
                              posenet_loss.euc_loss1x,
                              'image_beacon_cls1_fc_pose_wpqr':
                              posenet_loss.euc_loss1q,
                              'image_beacon_cls2_fc_pose_xyz':
                              posenet_loss.euc_loss2x,
                              'image_beacon_cls2_fc_pose_wpqr':
                              posenet_loss.euc_loss2q,
                              'image_beacon_cls3_fc_pose_xyz':
                              posenet_loss.euc_loss3x,
                              'image_beacon_cls3_fc_pose_wpqr':
                              posenet_loss.euc_loss3q
                          })
        elif posenet_config.base_model == "inception-v3":
            model = posenet_image_beacon_no_inception_shrink_keras.create_posenet_inception_v3(
                beacon_num, input_image_weight_file, input_beacon_weight_file)
            model.compile(optimizer=adam,
                          loss={
                              'image_beacon_cls3_fc_pose_xyz':
                              posenet_loss.euc_loss3x,
                              'image_beacon_cls3_fc_pose_wpqr':
                              posenet_loss.euc_loss3q
                          })
        elif posenet_config.base_model == "mobilenet-v1":
            model = posenet_image_beacon_no_inception_shrink_keras.create_posenet_mobilenet_v1(
                beacon_num, input_image_weight_file, input_beacon_weight_file)
            model.compile(optimizer=adam,
                          loss={
                              'image_beacon_cls_fc_pose_xyz':
                              posenet_loss.euc_loss3x,
                              'image_beacon_cls_fc_pose_wpqr':
                              posenet_loss.euc_loss3q
                          })
        else:
            print "invalid base model : " + posenet_config.base_model
            sys.exit()
    else:
        print "Do not shrink model is not supported"
        sys.exit()
    model.summary()

    # Setup checkpointing
    checkpointer = ModelCheckpoint(filepath=os.path.join(
        output_model_dir, "checkpoint_weights.h5"),
                                   verbose=1,
                                   save_weights_only=True,
                                   period=1)

    # Save Tensorboard log
    logger = TensorBoard(log_dir=output_log_dir,
                         histogram_freq=0,
                         write_graph=True)

    # Adjust Epoch size depending on beacon data augmentation
    if use_augmentation_beacon:
        posenet_config.epochs = posenet_config.epochs / posenet_config.num_beacon_augmentation
    steps_per_epoch = int(
        len(datasource.poses_index) / float(posenet_config.batch_size))
    num_iterations = steps_per_epoch * posenet_config.epochs
    print("Number of epochs : " + str(posenet_config.epochs))
    print("Number of training data : " + str(len(datasource.poses_index)))
    print("Number of iterations : " + str(num_iterations))

    history = model.fit_generator(
        posenet_data_utils.gen_image_beacon_data_batch(
            datasource, output_auxiliary=output_auxiliary),
        steps_per_epoch=steps_per_epoch,
        epochs=posenet_config.epochs,
        callbacks=[checkpointer, logger])

    model.save_weights(os.path.join(output_model_dir, "trained_weights.h5"))
Esempio n. 14
0
def main():
    global base_model

    description = 'This script is for testing posenet'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('input_txt_file', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='File path of input txt file in Cambridge Visual Landmark Dataset format.')
    parser.add_argument('input_beacon_setting_file', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='File path where beacon setting file is saved.')
    parser.add_argument('input_model_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where input model is saved.')
    parser.add_argument('result_log_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where localization result files are saved.')
    parser.add_argument('-s', '--use_shrink_model', action='store_true', default=False, \
                        help='Use shrink model (default: False)')
    parser.add_argument('-f', '--use_fixed_input_mean_std', action='store_true', default=False, \
                        help='Use fixed input mean and std (default: False)')
    parser.add_argument('-m', '--base_model', action='store', type=str, default=base_model, \
                        help='Base model : inception-v1/inception-v3/mobilenet-v1 (Default : ' + str(base_model))
    args = parser.parse_args()
    input_txt_file = args.input_txt_file
    input_beacon_setting_file = args.input_beacon_setting_file
    input_model_dir = args.input_model_dir
    result_log_dir = args.result_log_dir
    use_shrink_model = args.use_shrink_model
    use_fixed_input_mean_std = args.use_fixed_input_mean_std
    base_model = args.base_model
    print "base model : " + str(base_model)
    print "use shrink model for training : " + str(use_shrink_model)
    print "use fixed input mean and std : " + str(use_fixed_input_mean_std)
    if base_model != "inception-v1" and base_model != "inception-v3" and base_model != "mobilenet-v1":
        print "invalid base model : " + base_model
        sys.exit()

    input_image_dir = os.path.dirname(input_txt_file)
    input_numpy_mean_image_file = os.path.join(input_model_dir,
                                               "mean_image.npy")
    if use_fixed_input_mean_std:
        input_numpy_mean_image = None
    else:
        input_numpy_mean_image = np.load(input_numpy_mean_image_file)
    input_numpy_mean_beacon_file = os.path.join(input_model_dir,
                                                "mean_beacon.npy")
    if use_fixed_input_mean_std:
        input_numpy_mean_beacon = None
    else:
        input_numpy_mean_beacon = np.load(input_numpy_mean_beacon_file)
    output_localize_txt_file = os.path.join(result_log_dir,
                                            "localize-poses.txt")
    output_localize_json_file = os.path.join(result_log_dir,
                                             "localize-poses.json")
    output_summary_log_file = os.path.join(result_log_dir, "summary-log.txt")
    output_hist_log_file = os.path.join(result_log_dir, "hist-log.txt")
    output_detail_log_file = os.path.join(result_log_dir, "detail-log.txt")

    # parse beacon setting file
    beaconmap = IBeaconUtils.parseBeaconSetting(input_beacon_setting_file)
    beacon_num = len(beaconmap.keys())

    if base_model == "inception-v1":
        image_size = 224
        output_auxiliary = True
    elif base_model == "inception-v3":
        image_size = 299
        output_auxiliary = False
    elif base_model == "mobilenet-v1":
        image_size = 224
        output_auxiliary = False
    else:
        print "invalid base model : " + base_model
        sys.exit()
    datasource, image_filenames = get_data(input_txt_file, input_image_dir,
                                           input_numpy_mean_image, beaconmap,
                                           input_numpy_mean_beacon,
                                           use_fixed_input_mean_std)
    predicted_poses = np.zeros((len(datasource.beacons), 7))
    groundtruth_poses = np.zeros((len(datasource.beacons), 7))
    results = np.zeros((len(datasource.beacons), 2))

    # Set GPU options
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)
    session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    KTF.set_session(session)

    # load model
    if use_shrink_model:
        if base_model == "inception-v1":
            model = posenet_image_beacon_no_inception_shrink_keras.create_posenet_inception_v1(
                beacon_num)
        elif base_model == "inception-v3":
            model = posenet_image_beacon_no_inception_shrink_keras.create_posenet_inception_v3(
                beacon_num)
        elif base_model == "mobilenet-v1":
            model = posenet_image_beacon_no_inception_shrink_keras.create_posenet_mobilenet_v1(
                beacon_num)
        else:
            print "invalid base model : " + base_model
            sys.exit()
    else:
        print "Do not shrink model is not supported"
        sys.exit()
    model.load_weights(os.path.join(input_model_dir, 'trained_weights.h5'))
    model.summary()

    time_array = np.array([])

    for i in range(len(datasource.image_filenames)):
        np_images = posenet_image_utils.preprocess_test_image(
            datasource.image_filenames[i],
            input_numpy_mean_image,
            use_fixed_input_mean_std,
            1,
            False,
            image_size=image_size)
        np_beacon = datasource.beacons[i]

        pose_q = np.asarray(datasource.poses[i][3:7])
        pose_x = np.asarray(datasource.poses[i][0:3])
        start_time = time.time()
        sample_predicted = model.predict([np_images, np_beacon])
        if output_auxiliary:
            # 0-3 are results for auxiliary outputs
            sample_predicted_x = sample_predicted[4]
            sample_predicted_q = sample_predicted[5]
        else:
            sample_predicted_x = sample_predicted[0]
            sample_predicted_q = sample_predicted[1]
        elapsed_time = time.time() - start_time
        time_array = np.append(time_array, elapsed_time)

        pose_q = np.squeeze(pose_q)
        pose_x = np.squeeze(pose_x)
        predicted_q = np.squeeze(sample_predicted_q)
        predicted_x = np.squeeze(sample_predicted_x)

        predicted_poses[i, 0] = predicted_x[0]
        predicted_poses[i, 1] = predicted_x[1]
        predicted_poses[i, 2] = predicted_x[2]
        predicted_poses[i, 3] = predicted_q[0]
        predicted_poses[i, 4] = predicted_q[1]
        predicted_poses[i, 5] = predicted_q[2]
        predicted_poses[i, 6] = predicted_q[3]

        groundtruth_poses[i, 0] = pose_x[0]
        groundtruth_poses[i, 1] = pose_x[1]
        groundtruth_poses[i, 2] = pose_x[2]
        groundtruth_poses[i, 3] = pose_q[0]
        groundtruth_poses[i, 4] = pose_q[1]
        groundtruth_poses[i, 5] = pose_q[2]
        groundtruth_poses[i, 6] = pose_q[3]

        # calculate error
        q1 = pose_q / np.linalg.norm(pose_q)
        q2 = predicted_q / np.linalg.norm(predicted_q)
        d = abs(np.sum(np.multiply(q1, q2)))
        # fix floating point inaccuracy
        if d < -1.0:
            d = -1.0
        if d > 1.0:
            d = 1.0
        theta = 2 * np.arccos(d) * 180 / math.pi
        error_x = np.linalg.norm(pose_x - predicted_x)
        results[i, :] = [error_x, theta]
        print 'Index=', i, ' , Pos Error(m)=', error_x, ',  Rot Error(degrees)=', theta

    # write localize poses
    with open(output_localize_txt_file, "w") as fw:
        fw.write("Localization Data V1\n")
        fw.write("ImageFile, Camera Position [X Y Z W P Q R]\n")
        fw.write("\n")
        for idx in range(len(datasource.beacons)):
            fw.write(
                str(idx) + " " +
                ' '.join(['%f' % p for p in predicted_poses[idx, :]]) + "\n")
    locJsonObj = {}
    locJsonObj["locGlobal"] = []
    for idx, pose in enumerate(predicted_poses):
        Rh = transformations.quaternion_matrix(
            [pose[3], pose[4], pose[5], pose[6]])
        groundtruth_pose = groundtruth_poses[idx]
        groundtruth_Rh = transformations.quaternion_matrix([
            groundtruth_pose[3], groundtruth_pose[4], groundtruth_pose[5],
            groundtruth_pose[6]
        ])

        jsonLoc = {}
        jsonLoc["beacon_idx"] = idx
        jsonLoc["t"] = [pose[0], pose[1], pose[2]]
        jsonLoc["R"] = Rh[0:3, 0:3].tolist()
        jsonLoc["groundtruth"] = [
            groundtruth_pose[0], groundtruth_pose[1], groundtruth_pose[2]
        ]
        jsonLoc["groundtruthR"] = groundtruth_Rh[0:3, 0:3].tolist()
        locJsonObj["locGlobal"].append(jsonLoc)
    with open(output_localize_json_file, "w") as fw:
        json.dump(locJsonObj, fw)

    # write histgram results
    bin_edge = [0.01 * float(x) for x in range(0, 1001)]
    dist_errors = results[:, 0]
    dist_hist, dist_hist_bins = np.histogram(dist_errors, bins=bin_edge)
    dist_hist_cum_ratio = np.cumsum(dist_hist) / float(len(datasource.beacons))
    print "Histogram of error: " + str(dist_hist)
    print "Cumulative ratio of error: " + str(dist_hist_cum_ratio)
    print "Total loc err larger than " + str(
        np.max(bin_edge)) + " meters: " + str(
            len(datasource.beacons) - np.sum(dist_hist))

    # write summary of results
    mean_result = np.mean(results, axis=0)
    std_result = np.std(results, axis=0)
    median_result = np.median(results, axis=0)
    max_result = np.max(results, axis=0)
    percentile_80_result = np.percentile(results, 80, axis=0)
    percentile_90_result = np.percentile(results, 90, axis=0)
    percentile_95_result = np.percentile(results, 95, axis=0)
    print 'Mean error ', mean_result[0], 'm  and ', mean_result[1], 'degrees.'
    print 'StdDev error ', std_result[0], 'm  and ', std_result[1], 'degrees.'
    print 'Median error ', median_result[0], 'm  and ', median_result[
        1], 'degrees.'
    print 'Max error ', max_result[0], 'm  and ', max_result[1], 'degrees.'
    print '80 percentile error ', percentile_80_result[
        0], 'm  and ', percentile_80_result[1], 'degrees.'
    print '90 percentile error ', percentile_90_result[
        0], 'm  and ', percentile_90_result[1], 'degrees.'
    print '95 percentile error ', percentile_95_result[
        0], 'm  and ', percentile_95_result[1], 'degrees.'
    print 'Mean time ', str(np.average(time_array))
    print 'StdDev time ', str(np.std(time_array))
    print 'Median time ', str(np.median(time_array))
    with open(output_summary_log_file, "w") as fw:
        fw.write("Number of test image = " + str(len(datasource.beacons)) +
                 "\n")
        fw.write("Mean error = " + str(mean_result[0]) + " meters." + "\n")
        fw.write("StdDev error = " + str(std_result[0]) + " meters." + "\n")
        fw.write("Median error = " + str(median_result[0]) + " meters." + "\n")
        fw.write("Max error = " + str(max_result[0]) + " meters." + "\n")
        fw.write("80 percentile error = " + str(percentile_80_result[0]) +
                 " meters." + "\n")
        fw.write("90 percentile error = " + str(percentile_90_result[0]) +
                 " meters." + "\n")
        fw.write("95 percentile error = " + str(percentile_95_result[0]) +
                 " meters." + "\n")
        fw.write("\n")
        fw.write("Mean error = " + str(mean_result[1]) + " degrees." + "\n")
        fw.write("StdDev error = " + str(std_result[1]) + " degrees." + "\n")
        fw.write("Median error = " + str(median_result[1]) + " degrees." +
                 "\n")
        fw.write("Max error = " + str(max_result[1]) + " degrees." + "\n")
        fw.write("80 percentile error = " + str(percentile_80_result[1]) +
                 " degrees." + "\n")
        fw.write("90 percentile error = " + str(percentile_90_result[1]) +
                 " degrees." + "\n")
        fw.write("95 percentile error = " + str(percentile_95_result[1]) +
                 " degrees." + "\n")
        fw.write("\n")
        fw.write("Histogram of error: " + str(dist_hist) + "\n")
        fw.write("Cumulative ratio: " + str(
            np.around(
                np.cumsum(dist_hist, dtype=float) /
                len(datasource.beacons), 2)) + "\n")
        fw.write("Total loc err larger than " + str(np.max(bin_edge)) +
                 " meters: " +
                 str(len(datasource.beacons) - np.sum(dist_hist)) + "\n")
        fw.write("\n")
        fw.write("Mean time = " + str(np.average(time_array)) + "\n")
        fw.write("StdDev time = " + str(np.std(time_array)) + "\n")
        fw.write("Median time = " + str(np.median(time_array)) + "\n")
    # write error histgram
    np.savetxt(output_hist_log_file,
               zip(dist_hist_bins, dist_hist_cum_ratio),
               delimiter=',')
    # write error histgram
    np.savetxt(output_hist_log_file,
               zip(dist_hist_bins, dist_hist_cum_ratio),
               delimiter=',')

    # write detail results
    with open(output_detail_log_file, "w") as fw:
        for idx in range(len(datasource.beacons)):
            fw.write(
                str(idx) + "," + str(results[idx, 0]) + "," +
                str(results[idx, 1]) + "\n")
Esempio n. 15
0
def main():
        global output_pos_layer_name
        global output_rot_layer_name
        
        description = 'This script is for testing posenet'
        parser = argparse.ArgumentParser(description=description)
        parser.add_argument('input_txt_file', action='store', nargs=None, const=None, \
                            default=None, type=str, choices=None, metavar=None, \
                            help='File path of input txt file in Cambridge Visual Landmark Dataset format.')
        parser.add_argument('input_beacon_setting_file', action='store', nargs=None, const=None, \
                            default=None, type=str, choices=None, metavar=None, \
                            help='File path where beacon setting file is saved.')
        parser.add_argument('input_pb_file', action='store', nargs=None, const=None, \
                            default=None, type=str, choices=None, metavar=None, \
                            help='File path of model pb file.')
        parser.add_argument('result_log_dir', action='store', nargs=None, const=None, \
                            default=None, type=str, choices=None, metavar=None, \
                            help='Directory path where localization result files are saved.')
        parser.add_argument('-f', '--use_fixed_input_mean_std', action='store_true', default=False, \
                            help='Use fixed input mean and std (default: False)')
        parser.add_argument('-m', '--base_model', action='store', type=str, default=posenet_config.base_model, \
                            help='Base model : inception-v1/inception-v3/mobilenet-v1 (Default : ' + str(posenet_config.base_model))
        args = parser.parse_args()
        input_txt_file = args.input_txt_file
        input_beacon_setting_file = args.input_beacon_setting_file
        input_pb_file = args.input_pb_file
        result_log_dir = args.result_log_dir
        use_fixed_input_mean_std = args.use_fixed_input_mean_std
        posenet_config.base_model = args.base_model
        print "base model : " + str(posenet_config.base_model)

        # you can check output node name by tensorflow/tools/graph_transforms::summarize_graph
        # https://github.com/tensorflow/models/tree/master/research/slim#Export
        if posenet_config.base_model=="inception-v1":
            output_pos_layer_name = "image_beacon_cls3_fc_pose_xyz/BiasAdd"
            output_rot_layer_name = "image_beacon_cls3_fc_pose_wpqr/BiasAdd"
        elif posenet_config.base_model=="inception-v3" or posenet_config.base_model=="mobilenet-v1":
            output_pos_layer_name = "image_beacon_cls_fc_pose_xyz/BiasAdd"
            output_rot_layer_name = "image_beacon_cls_fc_pose_wpqr/BiasAdd"
        else:
            print "invalid base model : " + posenet_config.base_model
            sys.exit()
        
        input_image_dir = os.path.dirname(input_txt_file)
        input_model_dir = os.path.dirname(input_pb_file)
        input_numpy_mean_image_file = os.path.join(input_model_dir, "mean_image.npy")
        if use_fixed_input_mean_std:
            input_numpy_mean_image = None
        else:
            input_numpy_mean_image = np.load(input_numpy_mean_image_file)
        input_numpy_mean_beacon_file = os.path.join(input_model_dir, "mean_beacon.npy")
        if use_fixed_input_mean_std:
            input_numpy_mean_beacon = None
        else:
            input_numpy_mean_beacon = np.load(input_numpy_mean_beacon_file)
        output_localize_txt_file = os.path.join(result_log_dir, "localize-poses.txt")
        output_localize_json_file = os.path.join(result_log_dir, "localize-poses.json")
        output_summary_log_file = os.path.join(result_log_dir, "summary-log.txt")
        output_hist_log_file = os.path.join(result_log_dir, "hist-log.txt")
        output_detail_log_file = os.path.join(result_log_dir, "detail-log.txt")
        
        # parse beacon setting file
        beaconmap = IBeaconUtils.parseBeaconSetting(input_beacon_setting_file)
        beacon_num = len(beaconmap.keys())
        
	image = tf.placeholder(tf.float32, [1, 224, 224, 3])
	beacons = tf.placeholder(tf.float32, [1, beacon_num, 1, 1])
	datasource, image_filenames = get_data(input_txt_file, input_image_dir, input_numpy_mean_image, beaconmap, input_numpy_mean_beacon, use_fixed_input_mean_std)
        predicted_poses = np.zeros((len(datasource.images),7))
        groundtruth_poses = np.zeros((len(datasource.images),7))
	results = np.zeros((len(datasource.images),2))
        
	# Set GPU options
	gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)
        
        time_array = np.array([])
        
        # Load model
        graph = load_graph(input_pb_file)
        input_image_name = "import/" + input_image_layer_name        
        input_beacon_name = "import/" + input_beacon_layer_name
        output_pos_name = "import/" + output_pos_layer_name
        output_rot_name = "import/" + output_rot_layer_name
        input_image_operation = graph.get_operation_by_name(input_image_name)
        input_beacon_operation = graph.get_operation_by_name(input_beacon_name)
        output_pos_operation = graph.get_operation_by_name(output_pos_name)
        output_rot_operation = graph.get_operation_by_name(output_rot_name)
        
	with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options), graph=graph) as sess:
		for i in range(len(datasource.images)):
			np_image = datasource.images[i]
			np_beacon = datasource.beacons[i]
			feed = {input_image_operation.outputs[0]: np_image, input_beacon_operation.outputs[0]: np_beacon}
                        
			pose_q= np.asarray(datasource.poses[i][3:7])
			pose_x= np.asarray(datasource.poses[i][0:3])
                        start_time = time.time()
			predicted_x, predicted_q = sess.run([output_pos_operation.outputs[0],
                                                             output_rot_operation.outputs[0]], feed_dict=feed)
                        elapsed_time = time.time() - start_time
                        time_array = np.append(time_array, elapsed_time)

			pose_q = np.squeeze(pose_q)
			pose_x = np.squeeze(pose_x)
			predicted_q = np.squeeze(predicted_q)
			predicted_x = np.squeeze(predicted_x)
                        
                        predicted_poses[i,0] = predicted_x[0]
                        predicted_poses[i,1] = predicted_x[1]
                        predicted_poses[i,2] = predicted_x[2]
                        predicted_poses[i,3] = predicted_q[0]
                        predicted_poses[i,4] = predicted_q[1]
                        predicted_poses[i,5] = predicted_q[2]
                        predicted_poses[i,6] = predicted_q[3]
                        
                        groundtruth_poses[i,0] = pose_x[0]
                        groundtruth_poses[i,1] = pose_x[1]
                        groundtruth_poses[i,2] = pose_x[2]
                        groundtruth_poses[i,3] = pose_q[0]
                        groundtruth_poses[i,4] = pose_q[1]
                        groundtruth_poses[i,5] = pose_q[2]
                        groundtruth_poses[i,6] = pose_q[3]
                        
			# calculate error
			q1 = pose_q / np.linalg.norm(pose_q)
			q2 = predicted_q / np.linalg.norm(predicted_q)
			d = abs(np.sum(np.multiply(q1,q2)))
                        # fix floating point inaccuracy
                        if d<-1.0:
                                d = -1.0
                        if d>1.0:
                                d = 1.0
			theta = 2 * np.arccos(d) * 180/math.pi
			error_x = np.linalg.norm(pose_x-predicted_x)
			results[i,:] = [error_x,theta]
                        print 'Index=', i, ' , Pos Error(m)=', error_x, ',  Rot Error(degrees)=', theta
        
        # write localize poses
        with open(output_localize_txt_file, "w") as fw:
                fw.write("Localization Data V1\n")
                fw.write("ImageFile, Camera Position [X Y Z W P Q R]\n")
                fw.write("\n")
                for idx, filename in enumerate(image_filenames):
                        fw.write(os.path.basename(filename) + " " + ' '.join(['%f' % p for p in predicted_poses[idx,:]]) + "\n")
        locJsonObj = {}
        locJsonObj["locGlobal"] = []
        for idx, pose in enumerate(predicted_poses):
                Rh = transformations.quaternion_matrix([pose[3], pose[4], pose[5], pose[6]])
                groundtruth_pose = groundtruth_poses[idx]
                groundtruth_Rh = transformations.quaternion_matrix([groundtruth_pose[3], groundtruth_pose[4], groundtruth_pose[5], groundtruth_pose[6]])
                
                jsonLoc = {}
                jsonLoc["filename"] = os.path.basename(image_filenames[idx])
                jsonLoc["t"] = [pose[0], pose[1], pose[2]]
                jsonLoc["R"] = Rh[0:3,0:3].tolist()
                jsonLoc["groundtruth"] = [groundtruth_pose[0], groundtruth_pose[1], groundtruth_pose[2]]
                jsonLoc["groundtruthR"] = groundtruth_Rh[0:3,0:3].tolist()
                locJsonObj["locGlobal"].append(jsonLoc)
        with open(output_localize_json_file, "w") as fw:
                json.dump(locJsonObj, fw)
        
        # write histgram results
        bin_edge = [0.01*float(x) for x in range(0,1001)]
        dist_errors = results[:,0]
        dist_hist, dist_hist_bins = np.histogram(dist_errors, bins=bin_edge)
        dist_hist_cum_ratio = np.cumsum(dist_hist) / float(len(datasource.images))
        print "Histogram of error: " + str(dist_hist)
        print "Cumulative ratio of error: " + str(dist_hist_cum_ratio)
        print "Total loc err larger than " + str(np.max(bin_edge)) + " meters: " + str(len(datasource.images)-np.sum(dist_hist))

        # write summary of results
	mean_result = np.mean(results,axis=0)
	std_result = np.std(results,axis=0)
	median_result = np.median(results,axis=0)
	max_result = np.max(results,axis=0)        
        percentile_80_result = np.percentile(results,80,axis=0)
        percentile_90_result = np.percentile(results,90,axis=0)
        percentile_95_result = np.percentile(results,95,axis=0)
	print 'Mean error ', mean_result[0], 'm  and ', mean_result[1], 'degrees.'
	print 'StdDev error ', std_result[0], 'm  and ', std_result[1], 'degrees.'
	print 'Median error ', median_result[0], 'm  and ', median_result[1], 'degrees.'
	print 'Max error ', max_result[0], 'm  and ', max_result[1], 'degrees.'        
	print '80 percentile error ', percentile_80_result[0], 'm  and ', percentile_80_result[1], 'degrees.'
	print '90 percentile error ', percentile_90_result[0], 'm  and ', percentile_90_result[1], 'degrees.'
	print '95 percentile error ', percentile_95_result[0], 'm  and ', percentile_95_result[1], 'degrees.'
	print 'Mean time ', str(np.average(time_array))
	print 'StdDev time ', str(np.std(time_array))
	print 'Median time ', str(np.median(time_array))
        with open(output_summary_log_file, "w") as fw:
                fw.write("Number of test image = " + str(len(datasource.images)) + "\n")
                fw.write("Mean error = " + str(mean_result[0]) + " meters." + "\n")
                fw.write("StdDev error = " + str(std_result[0]) + " meters." + "\n")
                fw.write("Median error = " + str(median_result[0]) + " meters." + "\n")
                fw.write("Max error = " + str(max_result[0]) + " meters." + "\n")
	        fw.write("80 percentile error = " + str(percentile_80_result[0]) + " meters." + "\n")
	        fw.write("90 percentile error = " + str(percentile_90_result[0]) + " meters." + "\n")
	        fw.write("95 percentile error = " + str(percentile_95_result[0]) + " meters." + "\n")
                fw.write("\n")
                fw.write("Mean error = " + str(mean_result[1]) + " degrees." + "\n")
                fw.write("StdDev error = " + str(std_result[1]) + " degrees." + "\n")
                fw.write("Median error = " + str(median_result[1]) + " degrees." + "\n")
                fw.write("Max error = " + str(max_result[1]) + " degrees." + "\n")
	        fw.write("80 percentile error = " + str(percentile_80_result[1]) + " degrees." + "\n")
                fw.write("90 percentile error = " + str(percentile_90_result[1]) + " degrees." + "\n")
                fw.write("95 percentile error = " + str(percentile_95_result[1]) + " degrees." + "\n")
                fw.write("\n")
                fw.write("Histogram of error: " + str(dist_hist) + "\n")
                fw.write("Cumulative ratio: " + str(np.around(np.cumsum(dist_hist,dtype=float)/len(datasource.images),2)) + "\n")
                fw.write("Total loc err larger than " + str(np.max(bin_edge)) + " meters: " + str(len(datasource.images)-np.sum(dist_hist)) + "\n")
                fw.write("\n")
                fw.write("Mean time = " + str(np.average(time_array)) + "\n")
                fw.write("StdDev time = " + str(np.std(time_array)) + "\n")
                fw.write("Median time = " + str(np.median(time_array)) + "\n")
        # write error histgram
        np.savetxt(output_hist_log_file, zip(dist_hist_bins, dist_hist_cum_ratio), delimiter=',')
        # write error histgram
        np.savetxt(output_hist_log_file, zip(dist_hist_bins, dist_hist_cum_ratio), delimiter=',')

        # write detail results
        with open(output_detail_log_file, "w") as fw:
                for idx, filename in enumerate(image_filenames):
                        fw.write(os.path.basename(filename) + "," + str(results[idx,0]) + "," + str(results[idx,1]) + "\n")
Esempio n. 16
0
def main():
    description = 'This script is for testing posenet'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('input_txt_file', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='File path of input txt file in Cambridge Visual Landmark Dataset format.')
    parser.add_argument('input_beacon_setting_file', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='File path where beacon setting file is saved.')
    parser.add_argument('output_model_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where output models will be saved.')
    parser.add_argument('output_log_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where log files will be saved.')
    parser.add_argument('-w', '--input_beacon_weight_file', action='store', type=str, default=None, \
                        help='File path beacon posenet weiht is stored in numpy format.')
    parser.add_argument('-s', '--use_shrink_model', action='store_true', default=False, \
                        help='Use shrink model (default: False)')
    parser.add_argument('-f', '--use_fixed_input_mean_std', action='store_true', default=False, \
                        help='Use fixed input mean and std (default: False)')
    parser.add_argument('-a', '--use_augmentation_beacon', action='store_true', default=False, \
                        help='Use data augmentation for beacon data (default: False)')
    parser.add_argument('-e', '--epochs', action='store', type=int, default=posenet_config.epochs, \
                        help='Epochs (Default : ' + str(posenet_config.epochs))
    parser.add_argument('-b', '--batch_size', action='store', type=int, default=posenet_config.lstm_batch_size, \
                        help='Batch size (Default : ' + str(posenet_config.lstm_batch_size))
    args = parser.parse_args()
    input_txt_file = args.input_txt_file
    input_beacon_setting_file = args.input_beacon_setting_file
    output_model_dir = args.output_model_dir
    output_log_dir = args.output_log_dir
    input_beacon_weight_file = args.input_beacon_weight_file
    use_shrink_model = args.use_shrink_model
    use_fixed_input_mean_std = args.use_fixed_input_mean_std
    use_augmentation_beacon = args.use_augmentation_beacon
    posenet_config.epochs = args.epochs
    posenet_config.lstm_batch_size = args.batch_size
    print "epochs : " + str(posenet_config.epochs)
    print "batch size : " + str(posenet_config.lstm_batch_size)
    print "use shrink model for training : " + str(use_shrink_model)
    print "use fixed input mean and std : " + str(use_fixed_input_mean_std)
    print "use beacon data augmentation : " + str(use_augmentation_beacon)
    if input_beacon_weight_file is None:
        print "please specify one of initial weight or restore directory"
        sys.exit()

    # parse beacon setting file
    beaconmap = IBeaconUtils.parseBeaconSetting(input_beacon_setting_file)
    beacon_num = len(beaconmap.keys())

    output_numpy_mean_beacon_file = os.path.join(output_model_dir,
                                                 "mean_beacon.npy")
    output_numpy_model_file = os.path.join(output_model_dir, "model.npy")
    output_model_file = os.path.join(output_model_dir, "model.ckpt")

    datasource, mean_beacon = posenet_data_utils.get_beacon_data(
        input_txt_file, beaconmap, beacon_num, use_fixed_input_mean_std,
        use_augmentation_beacon)
    if use_fixed_input_mean_std:
        print("Skip save mean beacon")
    else:
        with open(output_numpy_mean_beacon_file, 'wb') as fw:
            np.save(fw, mean_beacon)
        print("Save mean beacon at: " + output_numpy_mean_beacon_file)

    # Set GPU options
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)
    session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    KTF.set_session(session)

    # Train model
    s_x = K.variable(value=0.0)
    s_q = K.variable(value=-3.0)
    if use_shrink_model:
        model = posenet_beacon_no_inception_shrink_lstm_keras.create_posenet(
            beacon_num, input_beacon_weight_file)
    else:
        print "Do not shrink model is not supported"
        sys.exit()
    adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=0.00000001)
    euc_loss3x_s = posenet_homoscedastic_loss.euc_loss3x_s(s_x=s_x)
    euc_loss3q_s = posenet_homoscedastic_loss.euc_loss3q_s(s_q=s_q)
    model.compile(optimizer=adam,
                  loss={
                      'beacon_lstm_pose_xyz': euc_loss3x_s,
                      'beacon_lstm_pose_wpqr': euc_loss3q_s
                  })
    model.summary()

    # Setup checkpointing
    checkpointer = ModelCheckpoint(filepath=os.path.join(
        output_model_dir, "checkpoint_weights.h5"),
                                   verbose=1,
                                   save_weights_only=True,
                                   period=1)

    # Save Tensorboard log
    logger = TensorBoard(log_dir=output_log_dir,
                         histogram_freq=0,
                         write_graph=True)

    # Adjust Epoch size depending on beacon data augmentation
    if use_augmentation_beacon:
        posenet_config.epochs = posenet_config.epochs / posenet_config.num_beacon_augmentation
    steps_per_epoch = int(
        len(datasource.poses_index) / float(posenet_config.lstm_batch_size))
    num_iterations = steps_per_epoch * posenet_config.epochs
    print("Number of epochs : " + str(posenet_config.epochs))
    print("Number of training data : " + str(len(datasource.poses_index)))
    print("Number of iterations : " + str(num_iterations))

    history = model.fit_generator(
        posenet_data_utils.gen_beacon_lstm_data_batch(datasource),
        steps_per_epoch=steps_per_epoch,
        epochs=posenet_config.epochs,
        callbacks=[checkpointer, logger])

    model.save_weights(os.path.join(output_model_dir, "trained_weights.h5"))
Esempio n. 17
0
def main():
    # set parameter
    reconstructParam = ReconstructParam.ReconstructParam
    reconstructIBeaconParam = ReconstructIBeaconParam.ReconstructIBeaconParam
    reconstructBOWParam = ReconstructBOWParam.ReconstructBOWParam

    # parse parameters
    description = 'This script is for reconstruct 3D models from multiple videos and merge to one 3D model. ' + \
                'iBeacon is used for accelerating 3D model merge. ' + \
                'Please prepare multiple videos in Input folder.'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('project_path', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where your 3D model project is stored.')
    parser.add_argument('-k', '--path-camera-file', action='store', nargs='?', const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='File path where camera matrix is stored in Numpy text format. (default: focal length ' + \
                            str(reconstructParam.focalLength) + ' will be used)')
    parser.add_argument('--bow', action='store_true', default=False, \
                        help='Use BOW to accelerate 3D model merge if this flag is set (default: False)')
    args = parser.parse_args()
    PROJECT_PATH = args.project_path
    USE_BOW = args.bow
    PATH_CAMERA_FILE = args.path_camera_file

    if PATH_CAMERA_FILE:
        if os.path.exists(PATH_CAMERA_FILE):
            with open(PATH_CAMERA_FILE, "r") as camMatFile:
                K = np.loadtxt(camMatFile)
            if K.shape[0] != 3 or K.shape[1] != 3:
                print "Error : invalid camera matrix size = " + str(K)
                sys.exit()
            print "Focal length " + str(K[0][0]) + " is set for reconstruction"
            reconstructParam.focalLength = K[0][0]
        else:
            print "Error : invalid camera matrix file = " + PATH_CAMERA_FILE
            sys.exit()

    # get paths
    inputPath = os.path.join(PROJECT_PATH, "Input")
    outputPath = os.path.join(PROJECT_PATH, "Output")

    FileUtils.makedir(outputPath)

    # reconstruct all videos
    listVideo = sorted(os.listdir(inputPath))
    for video in listVideo:
        if not os.path.isdir(os.path.join(inputPath, video)):
            continue

        print "Begin reconstructing video : " + video

        sfm_mainDir = os.path.join(outputPath, video)
        sfm_inputDir = os.path.join(inputPath, video)
        sfm_inputImgDir = os.path.join(sfm_inputDir, "inputImg")
        sfm_matchesDir = os.path.join(sfm_mainDir, "matches")
        sfm_sfmDir = os.path.join(sfm_mainDir, "SfM")
        sfm_reconstructDir = os.path.join(sfm_sfmDir, "reconstruction")
        sfm_globalDir = os.path.join(sfm_reconstructDir, "global")

        FileUtils.makedir(sfm_mainDir)
        FileUtils.makedir(sfm_inputImgDir)
        FileUtils.makedir(sfm_matchesDir)
        FileUtils.makedir(sfm_sfmDir)
        FileUtils.makedir(sfm_reconstructDir)
        FileUtils.makedir(sfm_globalDir)

        if not os.path.isfile(os.path.join(sfm_globalDir, "sfm_data.json")):
            # list images
            os.system("openMVG_main_SfMInit_ImageListing -i " +
                      sfm_inputImgDir + " -o " + sfm_matchesDir + " -d " +
                      reconstructParam.CAMERA_DATABASE_PATH)

            # 1.1 Check intrinsic
            # ( if camera parameter not specified then replace with fixed camera.
            # and set appropriate width and height)
            with open(os.path.join(sfm_matchesDir,
                                   "sfm_data.json")) as sfm_data_file:
                sfm_data = json.load(sfm_data_file)
                hImg = sfm_data["views"][0]['value']['ptr_wrapper']['data'][
                    "height"]
                wImg = sfm_data["views"][0]['value']['ptr_wrapper']['data'][
                    "width"]
                if len(sfm_data["intrinsics"]) == 0:
                    for view in sfm_data["views"]:
                        view["value"]["ptr_wrapper"]["data"][
                            "id_intrinsic"] = 0

                    sfm_data["intrinsics"].append({})
                    sfm_data["intrinsics"][0]["key"] = 0
                    sfm_data["intrinsics"][0]["values"] = {}
                    # sfm_data["intrinsics"][0]["values"]["polymorphic_name"] = "pinhole_radial_k3"
                    sfm_data["intrinsics"][0]["values"][
                        "polymorphic_name"] = "pinhole"
                    sfm_data["intrinsics"][0]["values"][
                        "polymorphic_id"] = 2147999999
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"] = {}
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"][
                        "id"] = 2147483660
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"][
                        "data"] = {}
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"][
                        "width"] = wImg
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"][
                        "height"] = hImg
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"][
                        "focal_length"] = reconstructParam.focalLength
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"][
                        "disto_k3"] = [0, 0, 0]
                    sfm_data["intrinsics"][0]["values"]["ptr_wrapper"]["data"][
                        "principal_point"] = [wImg / 2, hImg / 2]

            with open(os.path.join(sfm_matchesDir, "sfm_data.json"),
                      "w") as sfm_data_file:
                json.dump(sfm_data, sfm_data_file)

            # 2 - Features computation and matching
            # ( Compute per image a list of features & descriptors )
            IBeaconUtils.exportBeaconDataForSfmImageFrames(
                os.path.join(sfm_inputDir, "csv"),
                os.path.join(sfm_matchesDir, "sfm_data.json"),
                os.path.join(inputPath, "listbeacon.txt"),
                os.path.join(sfm_matchesDir, "beacon.txt"),
                reconstructIBeaconParam.normApproach)

            os.system(reconstructParam.EXTRACT_FEATURE_MATCH_PROJECT_PATH + \
                      " " + sfm_matchesDir + \
                      " -mf=" + str(reconstructParam.maxTrackletMatchDistance) + \
                      " -mm=" + str(reconstructParam.minMatchToRetain) + \
                      " -f=" + str(reconstructParam.extFeatDistRatio) + \
                      " -r=" + str(reconstructParam.extFeatRansacRound))

            # OpenMVG assumes matches.e.txt for global reconstruction, matches.f.txt for incremental reconstruction
            os.system("cp " + os.path.join(sfm_matchesDir, "matches.f.txt") +
                      " " + os.path.join(sfm_matchesDir, "matches.e.txt"))

            # 3 - Global reconstruction
            countRecon = 1
            while not os.path.isfile(
                    os.path.join(sfm_globalDir, "sfm_data.json")
            ) and countRecon < reconstructParam.rerunRecon:
                os.system("openMVG_main_GlobalSfM -i " +
                          os.path.join(sfm_matchesDir, "sfm_data.json") +
                          " -m " + sfm_matchesDir + " -o " + sfm_globalDir)
                # for OpenMVG 1.0
                #os.system("openMVG_main_ConvertSfM_DataFormat -i " + os.path.join(sfm_globalDir, "sfm_data.bin") + " -o " + os.path.join(sfm_globalDir, "sfm_data.json"))
                countRecon = countRecon + 1
                time.sleep(1)

            if not os.path.isfile(os.path.join(sfm_globalDir,
                                               "sfm_data.json")):
                continue

            # 4 - Color the pointcloud
            os.system("openMVG_main_ComputeSfM_DataColor -i " +
                      os.path.join(sfm_globalDir, "sfm_data.json") + " -o " +
                      os.path.join(sfm_globalDir, "colorized.ply"))

            # 4.5 remove part of reconstruction where it is incorrect
            # Specifically, sometimes when their matching is not adequate,
            # the reconstructed model will be divided into two or more models
            # with different scale and a "jump" between pose translation.
            # This function detects such jump and retain the the largest
            # beginning or ending part of reconstruction, while the rest
            # should be reconstructed separately by cleanSfM.
            countCut = 0
            # keep cutting until no more cut
            while cleanSfM.cutSfMDataJump(
                    os.path.join(sfm_globalDir, "sfm_data.json"),
                    bufferFrame=reconstructParam.bufferFrame):
                countCut = countCut + 1
                os.rename(
                    os.path.join(sfm_globalDir, "sfm_data_BC.json"),
                    os.path.join(sfm_globalDir,
                                 "sfm_data_BC" + str(countCut) + ".json"))
                # modified by T. Ishihara 2016.06.14
                # change bundle adjustment parameter for cutted model
                '''                
                os.system(reconstructParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + \
                          " " + os.path.join(sfm_globalDir, "sfm_data.json") + \
                          " " + os.path.join(sfm_globalDir, "sfm_data.json") + \
                          " -c=" + "rs,rst,rsti")
                '''
                os.system(reconstructParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + \
                          " " + os.path.join(sfm_globalDir, "sfm_data.json") + \
                          " " + os.path.join(sfm_globalDir, "sfm_data.json") + \
                          " -c=" + "rst,rsti" + " -r=" + "1")
            os.system("openMVG_main_ComputeSfM_DataColor -i " +
                      os.path.join(sfm_globalDir, "sfm_data.json") + " -o " +
                      os.path.join(sfm_globalDir, "colorized_AC.ply"))

            # 5 - Clean sfm_data by removing viewID of frames that are not used
            # in reconstruction and put them in another folder and reconstruct them again
            # note that sfm_data.json in matches folder is renamed and kept as reference
            unusedImg = cleanSfM.cleanSfM(
                os.path.join(sfm_globalDir, "sfm_data.json"), [
                    os.path.join(sfm_matchesDir, "matches.putative.txt"),
                    os.path.join(sfm_matchesDir, "matches.e.txt"),
                    os.path.join(sfm_matchesDir, "matches.f.txt")
                ])

            # 6. move unused images, csv files into a new folder unless they have less than x images
            for i in range(0, len(unusedImg)):
                listUnused = unusedImg[i]
                if len(listUnused) < reconstructParam.minUnusedImgLength:
                    continue

                # set name for new video
                if i == 0:
                    newVidName = video + "_front"
                elif i == 1:
                    newVidName = video + "_back"
                else:
                    # this should not be called
                    continue

                # set path
                pathNewVid = os.path.join(inputPath, newVidName)

                # skip if there is already this folder
                if os.path.isdir(pathNewVid):
                    continue

                print "Extract unused part of " + video + " into " + newVidName

                FileUtils.makedir(pathNewVid)

                csvNewVid = os.path.join(pathNewVid, "csv")
                imgNewVid = os.path.join(pathNewVid, "inputImg")
                FileUtils.makedir(csvNewVid)
                FileUtils.makedir(imgNewVid)

                # copy image in list and csv file
                os.system("cp -s " +
                          os.path.join(sfm_inputDir, "csv", "*.csv") + " " +
                          csvNewVid)
                for unusedFilename in listUnused:
                    os.system("cp -s " +
                              os.path.join(sfm_inputImgDir, unusedFilename) +
                              " " + imgNewVid)

                # append the folder into reconstruction queue
                listVideo.append(newVidName)

    # train bag of words model, and extract bag of words feature for all images
    if USE_BOW and not os.path.isfile(
            os.path.join(outputPath, "merge_result", "Output", "matches",
                         "BOWfile.yml")):
        outputBowPath = os.path.join(outputPath, "merge_result", "Output",
                                     "matches")
        if not os.path.isdir(outputBowPath):
            FileUtils.makedir(outputBowPath)
        print "Execute Training BOW : " + reconstructParam.WORKSPACE_DIR + "/TrainBoW/Release/TrainBoW " + outputPath + " " + \
                  os.path.join(outputBowPath, "BOWfile.yml") + " -p=" + os.path.join(outputBowPath, "PCAfile.yml")
        os.system(reconstructParam.WORKSPACE_DIR + "/TrainBoW/Release/TrainBoW " + outputPath + " " + \
                  os.path.join(outputBowPath, "BOWfile.yml") + " -p=" + os.path.join(outputBowPath, "PCAfile.yml"))

    # load graph structure from "mergeGraph.txt" if it exists
    # create new graph structure if it does not exist
    if os.path.isfile(
            os.path.join(outputPath, "merge_result", "Output", "SfM",
                         "reconstruction", "mergeGraph.txt")):
        sfmGraph = sfmMergeGraphIBeacon.sfmGraphIBeacon.load(
            os.path.join(outputPath, "merge_result", "Output", "SfM",
                         "reconstruction", "mergeGraph.txt"))
        sfmGraph.workspacePath = reconstructParam.WORKSPACE_DIR

        #### start of manually adding new model code ####
        # In current code, you cannot add new 3D model once you start merging.
        # Enable following commented code to add new 3D model after you already started merging.
        '''
        newModelToAdd = []
        for newModelName in newModelToAdd:
            addModel(newModelName,os.path.join(inputPath,newModelName),os.path.join(outputPath,newModelName))
        sfmGraph.clearBadMatches()
        '''
        ### end of manually adding new model code ###
    else:
        sfmGraph = sfmMergeGraphIBeacon.sfmGraphIBeacon(
            inputPath,
            outputPath,
            os.path.join(outputPath, "merge_result", "Input"),
            os.path.join(outputPath, "merge_result", "Output", "SfM",
                         "reconstruction"),
            os.path.join(outputPath, "merge_result", "Output", "matches"),
            os.path.join(outputPath, "merge_result", "Input", "csv"),
            os.path.join(outputPath, "merge_result", "Input", "inputImg"),
            reconstructParam.WORKSPACE_DIR,
            USE_BOW,
            validMergeRansacThresK=reconstructParam.vldMergeAgrFrameThresK,
            minReconFrame=reconstructParam.minReconFrameToAdd)

    sfmGraph.mergeModel(os.path.join(inputPath, "listbeacon.txt"),
                        os.path.join(outputPath, listVideo[0], "matches",
                                     "image_describer.txt"),
                        inputPath,
                        outputPath,
                        reconParam=reconstructParam,
                        reconIBeaconParam=reconstructIBeaconParam,
                        reconBOWParam=reconstructBOWParam)

    # select largest 3D model and save it
    SfMDataUtils.saveFinalSfM(PROJECT_PATH)