コード例 #1
0
def main():
    """
    Inputs: 
    None
    Outputs:
    Runs Testing code
    """
    # TODO: Make LogDir
    # TODO: Display time to end and cleanup other print statements with color
    # TODO: Make logging file a parameter

    # Parse Command Line arguments
    Parser = argparse.ArgumentParser()
    Parser.add_argument('--ModelPath', dest='ModelPath', default='/media/nitin/Research/EVDodge/CheckpointsEVDBHomographyDodgeNetLR5e-4Epoch200/199model.ckpt',\
                                                         help='Path to load latest model from, Default:ModelPath')
    Parser.add_argument('--ReadPath', dest='ReadPath', default='/media/nitin/Research/EVDodge/DatasetChethanEvents/DeblurredHomography',\
                                                                             help='Path to load images from, Default:ReadPath')
    Parser.add_argument('--WritePath', dest='WritePath', default='/media/nitin/Research/EVDodge/DatasetChethanEvents/DeblurredHomographyDodgeNet',\
                                                                             help='Path to write images to, Default:WritePath')
    Parser.add_argument(
        '--GPUDevice',
        type=int,
        default=0,
        help='What GPU do you want to use? -1 for CPU, Default:0')

    Args = Parser.parse_args()
    ModelPath = Args.ModelPath
    ReadPath = Args.ReadPath
    WritePath = Args.WritePath
    GPUDevice = Args.GPUDevice

    # Set GPUDevice
    tu.SetGPU(GPUDevice)

    # Setup all needed parameters including file reading
    TrainNames, ImageSize, PatchSize, NumTrainSamples, NumImgsStack = SetupAll(
        ReadPath)

    # Define PlaceHolder variables for Input and Predicted output
    PatchPH = tf.placeholder(tf.float32,
                             shape=(1, PatchSize[0], PatchSize[1],
                                    2 * PatchSize[2]),
                             name='Input')

    TestOperation(PatchPH, PatchSize, ModelPath, ReadPath, WritePath,
                  TrainNames, NumTrainSamples)
コード例 #2
0
def main():
    """
    Inputs: 
    None
    Outputs:
    Runs the Training and testing code based on the Flag
    """
    # TODO: Make LogDir
    # TODO: Make logging file a parameter
    # TODO: Time to complete print

    # Parse Command Line arguments
    Parser = argparse.ArgumentParser()
    Parser.add_argument('--BasePath', default='/home/nitin/EVDodge/DatasetsForPaper/DownfacingEventsProcessed', help='Base path of images, Default:/home/nitin/EVDodge/DatasetsForPaper/DownfacingEventsProcessed')
    Parser.add_argument('--NumEpochs', type=int, default=200, help='Number of Epochs to Train for, Default:200')
    Parser.add_argument('--DivTrain', type=int, default=1, help='Factor to reduce Train data by per epoch, Default:1')
    Parser.add_argument('--MiniBatchSize', type=int, default=256, help='Size of the MiniBatch to use, Default:256')
    Parser.add_argument('--LoadCheckPoint', type=int, default=0, help='Load Model from latest Checkpoint from CheckPointsPath?, Default:0')
    Parser.add_argument('--LossFuncName', default='PhotoL1', help='Choice of Loss functions, choose from PhotoL1, PhotoChab, PhotoRobust when using TrainingType as US. Default:PhotoL1')
    Parser.add_argument('--NetworkType', default='Small', help='Choice of Network type, choose from Small, Large, Default:Small')
    Parser.add_argument('--CheckPointPath', default='../CheckpointsHomography/', help='Path to save checkpoints, Default:../CheckpointsHomography/')
    Parser.add_argument('--LogsPath', default='/media/nitin/Research/EVDodge/Logs/', help='Path to save Logs, Default:/media/nitin/Research/EVDodge/Logs/')
    Parser.add_argument('--GPUDevice', type=int, default=0, help='What GPU do you want to use? -1 for CPU, Default:0')
    Parser.add_argument('--LR', type=float, default=1e-4, help='Learning Rate, Default: 1e-4')
    Parser.add_argument('--TrainingType', default='US', help='Training Type, S: Supervised, US: Unsupervised, Default: US')
    
    Args = Parser.parse_args()
    NumEpochs = Args.NumEpochs
    BasePath = Args.BasePath
    DivTrain = float(Args.DivTrain)
    MiniBatchSize = Args.MiniBatchSize
    LoadCheckPoint = Args.LoadCheckPoint
    LossFuncName = Args.LossFuncName
    NetworkType = Args.NetworkType
    CheckPointPath = Args.CheckPointPath
    LogsPath = Args.LogsPath
    GPUDevice = Args.GPUDevice
    LearningRate = Args.LR
    TrainingType = Args.TrainingType
    
    # Set GPUDevice
    tu.SetGPU(GPUDevice)

    # Setup all needed parameters including file reading
    TrainNames, ValNames, TestNames, OptimizerParams,\
    SaveCheckPoint, ImageSize, Rho, NumTrainSamples, NumValSamples, NumTestSamples,\
    NumTestRunsPerEpoch = SetupAll(BasePath, LearningRate)

    # If CheckPointPath doesn't exist make the path
    if(not (os.path.isdir(CheckPointPath))):
       os.makedirs(CheckPointPath)

    # Find Latest Checkpoint File
    if LoadCheckPoint==1:
        LatestFile = FindLatestModel(CheckPointPath)
    else:
        LatestFile = None
    
    # Pretty print stats
    PrettyPrint(NumEpochs, DivTrain, MiniBatchSize, NumTrainSamples, NumTestSamples, LatestFile)
        
    # Define PlaceHolder variables for Input and Predicted output
    ImgPH = tf.placeholder(tf.float32, shape=(MiniBatchSize, ImageSize[0], ImageSize[1], 6), name='Input')

    # PH for losses
    I1PH = tf.placeholder(tf.float32, shape=(MiniBatchSize, 260, 346, 3), name='I1')
    I2PH = tf.placeholder(tf.float32, shape=(MiniBatchSize, 260, 346, 3), name='I2')
    MaskPH = tf.placeholder(tf.float32, shape=(MiniBatchSize, 260, 346, 3), name='Mask')
    AllPtsPH = tf.placeholder(tf.float32, shape=(MiniBatchSize, 4, 2), name='AllPts')
    LabelPH = tf.placeholder(tf.float32, shape=(MiniBatchSize, 8, 1), name='Label')  

    TrainOperation(ImgPH, I1PH, I2PH, MaskPH, AllPtsPH, LabelPH, TrainNames, TestNames, NumTrainSamples, ImageSize, Rho,
                   NumEpochs, MiniBatchSize, OptimizerParams, SaveCheckPoint, CheckPointPath, NumTestRunsPerEpoch,
                   DivTrain, LatestFile, LossFuncName, NetworkType, BasePath, LogsPath, TrainingType)
コード例 #3
0
def TrainOperation(ImgPH, I1PH, I2PH, MaskPH, AllPtsPH, LabelPH, TrainNames, TestNames, NumTrainSamples, ImageSize, Rho,
                   NumEpochs, MiniBatchSize, OptimizerParams, SaveCheckPoint, CheckPointPath, NumTestRunsPerEpoch,
                   DivTrain, LatestFile, LossFuncName, NetworkType, BasePath, LogsPath, TrainingType):
    """
    Inputs: 
    ImgPH is the Input Image placeholder
    HomingVecPH is the ground truth  homing vector placeholder
    DirNames - Full path to all image files without extension
    Train/Val - Idxs of all the images to be used for training/validation (held-out testing in this case)
    Train/ValLabels - Labels corresponding to Train/Val
    NumTrain/ValSamples - length(Train/Val)
    ImageSize - Size of the image
    NumEpochs - Number of passes through the Train data
    MiniBatchSize is the size of the MiniBatch
    OptimizerParams - List of all OptimizerParams: depends on Optimizer
    SaveCheckPoint - Save checkpoint every SaveCheckPoint iteration in every epoch, checkpoint saved automatically after every epoch
    CheckPointPath - Path to save checkpoints/model
    NumTestRunsPerEpoch - Number of passes of Val data with MiniBatchSize 
    DivTrain - Divide the data by this number for Epoch calculation, use if you have a lot of data
    LatestFile - Latest checkpointfile to continue training
    Outputs:
    Saves Trained network in CheckPointPath
    """
    
    # Predict output with forward pass
    if(NetworkType == 'Small'):
        prHVal = EVHomographyNetUnsupSmall(ImgPH, ImageSize, MiniBatchSize)
        with tf.name_scope('Loss'):
    	    loss, WarpI1, WarpMask, WarpI1Patch, I2Patch, _ = LossFunc(I1PH, I2PH, MaskPH, AllPtsPH, LabelPH, prHVal, MiniBatchSize, LossFuncName, TrainingType)
    elif(NetworkType == 'Large'):
        prHVal = EVHomographyNetUnsup(ImgPH, ImageSize, MiniBatchSize)
        with tf.name_scope('Loss'):
    	    loss, WarpI1, WarpMask, WarpI1Patch, I2Patch, _ = LossFunc(I1PH, I2PH, MaskPH, AllPtsPH, LabelPH, prHVal, MiniBatchSize, LossFuncName, TrainingType)
    elif(NetworkType == 'SmallRobust'):
        prHVal, prAlpha = EVHomographyNetUnsupSmallRobust(ImgPH, ImageSize, MiniBatchSize)
        with tf.name_scope('Loss'):
    	    loss, WarpI1, WarpMask, WarpI1Patch, I2Patch, Alpha = LossFunc(I1PH, I2PH, MaskPH, AllPtsPH, LabelPH, prHVal, MiniBatchSize, LossFuncName, TrainingType, a=prAlpha, ImageSize=ImageSize)
            
    with tf.name_scope('Adam'):
        Optimizer = tf.train.AdamOptimizer(learning_rate=OptimizerParams[0], beta1=OptimizerParams[1],
                                           beta2=OptimizerParams[2], epsilon=OptimizerParams[3])
        Gradients = Optimizer.compute_gradients(loss)
        OptimizerUpdate = Optimizer.apply_gradients(Gradients)
        #Optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-8).minimize(loss)
        #Optimizer = tf.train.MomentumOptimizer(learning_rate=1e-3, momentum=0.9, use_nesterov=True).minimize(loss)

    # Tensorboard
    # Create a summary to monitor loss tensor
    tf.summary.scalar('LossEveryIter', loss)
    tf.summary.image('WarpedImg', WarpI1[:,:,:,0:3])
    tf.summary.image('I1', I1PH[:,:,:,0:3])
    tf.summary.image('I2', I2PH[:,:,:,0:3])
    tf.summary.image('Mask', MaskPH[:,:,:,0:3])
    tf.summary.image('WarpMask', WarpMask[:,:,:,0:3])
    # tf.summary.image('WarpI1Patch', WarpI1Patch[:,:,:,0:3])
    # tf.summary.image('I2Patch', I2Patch[:,:,:,0:3])
    # tf.summary.image('I2Patch - WarpI1Patch', tf.abs(I2Patch[:,:,:,0:3] - WarpI1Patch[:,:,:,0:3]))
    tf.summary.histogram('prHVal', prHVal, collections=None, family=None)
    if(NetworkType == 'SmallRobust'):
        tf.summary.image('Alpha1', Alpha[:,:,:,0:1])
        tf.summary.image('Alpha2', Alpha[:,:,:,1:2])
        tf.summary.image('Alpha3', Alpha[:,:,:,2:3])
        tf.summary.histogram('Alpha1', Alpha[:,:,:,0:1], collections=None, family=None)
        tf.summary.histogram('Alpha2', Alpha[:,:,:,1:2], collections=None, family=None)
        tf.summary.histogram('Alpha3', Alpha[:,:,:,2:3], collections=None, family=None)
    # Merge all summaries into a single operation
    MergedSummaryOP = tf.summary.merge_all()
    
    AllEpochLoss = [0.0]
    EachIterLoss = [0.0]
    # Setup Saver
    Saver = tf.train.Saver()
    
    with tf.Session() as sess:       
        if LatestFile is not None:
            Saver.restore(sess, CheckPointPath + LatestFile + '.ckpt')
            # Extract only numbers from the name
            StartEpoch = int(''.join(c for c in LatestFile.split('a')[0] if c.isdigit()))
            print('Loaded latest checkpoint with the name ' + LatestFile + '....')
        else:
            sess.run(tf.global_variables_initializer())
            StartEpoch = 0
            print('New model initialized....')

        # Print Number of parameters in the network    
        tu.FindNumParams(1)
        
        # Tensorboard
        Writer = tf.summary.FileWriter(LogsPath, graph=tf.get_default_graph())
            
        TotalTimeElapsed = 0.0
        TimerOverall = tic()
        for Epochs in tqdm(range(StartEpoch, NumEpochs)):
            EpochLoss = 0.0
            Timer1 = tic()
            NumIterationsPerEpoch = int(NumTrainSamples/MiniBatchSize/DivTrain)
            for PerEpochCounter in tqdm(range(NumIterationsPerEpoch)):
                print('Epoch ' + str(Epochs) + ' PerEpochCounter ' + str(PerEpochCounter))
                Timer2 = tic()

                IBatch, LabelBatch, I1Batch, I2Batch, I1PatchBatch, I2PatchBatch, \
                AllPtsBatch, PerturbPtsBatch, MaskBatch = GenerateBatch(TrainNames, ImageSize, MiniBatchSize, Rho, BasePath)

                FeedDict = {ImgPH: IBatch, I1PH: I1Batch, AllPtsPH: AllPtsBatch, I2PH: I2Batch, MaskPH: MaskBatch, LabelPH: LabelBatch}
                _, LossThisBatch, Summary = sess.run([OptimizerUpdate, loss, MergedSummaryOP], feed_dict=FeedDict)
                
                # Tensorboard
                Writer.add_summary(Summary, Epochs*NumIterationsPerEpoch + PerEpochCounter)
                # If you don't flush the tensorboard doesn't update until a lot of iterations!
                Writer.flush()

                # Calculate and print Train accuracy (also called EpochLoss) every epoch
                EpochLoss += LossThisBatch

                # Save All losses
                EachIterLoss.append(LossThisBatch)

                TimeLastMiniBatch = toc(Timer2)

                # Print LossThisBatch
                print('LossThisBatch is  '+ str(LossThisBatch))
                
                # Save checkpoint every some SaveCheckPoint's iterations
                if PerEpochCounter % SaveCheckPoint == 0:
                    # Save the Model learnt in this epoch
                    SaveName =  CheckPointPath + str(Epochs) + 'a' + str(PerEpochCounter) + 'model.ckpt'
                    Saver.save(sess,  save_path=SaveName)
                    print(SaveName + ' Model Saved...')

                # Print timing information
                EstimatedTimeToCompletionThisEpoch = float(TimeLastMiniBatch)*float(NumIterationsPerEpoch-PerEpochCounter-1.0)
                EstimatedTimeToCompletionTotal = float(TimeLastMiniBatch)*float(NumIterationsPerEpoch-PerEpochCounter-1.0) +\
                                                 float(TimeLastMiniBatch)*float(NumIterationsPerEpoch-1.0)*float(NumEpochs-Epochs)
                TotalTimeElapsed = toc(TimerOverall)
                print('Percentage complete in total epochs ' + str(float(Epochs+1)/float(NumEpochs-StartEpoch+1)*100.0))
                print('Percentage complete in this Train epoch ' + str(float(PerEpochCounter)/float(NumIterationsPerEpoch)*100.0))
                print('Last MiniBatch took '+ str(TimeLastMiniBatch) + ' secs, time taken till now ' + str(TotalTimeElapsed) + \
                      ' estimated time to completion of this epoch is ' + str(EstimatedTimeToCompletionThisEpoch))
                print('Estimated Total time remaining is ' + str(EstimatedTimeToCompletionTotal))
                
            TimeLastEpoch = toc(Timer1)
            EstimatedTimeToCompletion = float(TotalTimeElapsed)/float(Epochs+1.0)*float(NumEpochs-Epochs-1.0)
                
            # Save Each Epoch loss
            AllEpochLoss.append(EpochLoss)
            
            # Save model every epoch
            SaveName = CheckPointPath + str(Epochs) + 'model.ckpt'
            Saver.save(sess, save_path=SaveName)
            print(SaveName + ' Model Saved...')
コード例 #4
0
def main():
    """
    Inputs: 
    None
    Outputs:
    Runs the Training and testing code based on the Flag
    """
    # TODO: Make LogDir
    # TODO: Make logging file a parameter
    # TODO: Time to complete print

    # Parse Command Line arguments
    Parser = argparse.ArgumentParser()
    Parser.add_argument('--BasePath', default='/media/nitin/Research/EVDodge/downfacing_processed', help='Base path of images, Default:/media/nitin/Research/EVDodge/downfacing_processed')
    Parser.add_argument('--NumEpochs', type=int, default=200, help='Number of Epochs to Train for, Default:200')
    Parser.add_argument('--DivTrain', type=int, default=1, help='Factor to reduce Train data by per epoch, Default:1')
    Parser.add_argument('--MiniBatchSize', type=int, default=256, help='Size of the MiniBatch to use, Default:256')
    Parser.add_argument('--LoadCheckPoint', type=int, default=0, help='Load Model from latest Checkpoint from CheckPointsPath?, Default:0')
    Parser.add_argument('--CheckPointPath', default='../CheckpointsHomography/', help='Path to save checkpoints, Default:../CheckpointsHomography/')
    Parser.add_argument('--LogsPath', default='/media/nitin/Research/EVDodge/Logs/', help='Path to save Logs, Default:/media/nitin/Research/EVDodge/Logs/')
    Parser.add_argument('--LossFuncName', default='M', help='Choice of Loss functions, choose from M for Mean, V for Variance, Default:M')
    Parser.add_argument('--GPUDevice', type=int, default=0, help='What GPU do you want to use? -1 for CPU, Default:0')
    Parser.add_argument('--LR', type=float, default=1e-4, help='Learning Rate, Default: 1e-4')
    Parser.add_argument('--SymType', default='L1', help='Similarity mapping, choose from L1 and Chab, Default:L1')
    
    Args = Parser.parse_args()
    NumEpochs = Args.NumEpochs
    BasePath = Args.BasePath
    DivTrain = float(Args.DivTrain)
    MiniBatchSize = Args.MiniBatchSize
    LoadCheckPoint = Args.LoadCheckPoint
    CheckPointPath = Args.CheckPointPath
    LogsPath = Args.LogsPath
    LossFuncName = Args.LossFuncName
    GPUDevice = Args.GPUDevice
    LearningRate = Args.LR
    SymType = Args.SymType
    
    # Set GPUDevice
    tu.SetGPU(GPUDevice)


    # Setup all needed parameters including file reading
    TrainNames, ValNames, TestNames, OptimizerParams,\
    SaveCheckPoint, ImageSize, PatchSize, NumTrainSamples, NumValSamples, NumTestSamples,\
    NumTestRunsPerEpoch = SetupAll(BasePath, LearningRate)

    # Find Latest Checkpoint File
    if LoadCheckPoint==1:
        LatestFile = FindLatestModel(CheckPointPath)
    else:
        LatestFile = None
    
    # Pretty print stats
    PrettyPrint(NumEpochs, DivTrain, MiniBatchSize, NumTrainSamples, NumTestSamples, LatestFile)
        
    # Define PlaceHolder variables for Input and Predicted output
    PatchPH = tf.placeholder(tf.float32, shape=(MiniBatchSize, PatchSize[0], PatchSize[1], PatchSize[2]), name='Input')

    # PH for losses
    IPH = tf.placeholder(tf.float32, shape=(MiniBatchSize, ImageSize[0], ImageSize[1], ImageSize[2]), name='IPH')
    MaskPH = tf.placeholder(tf.float32, shape=(MiniBatchSize, ImageSize[0], ImageSize[1], ImageSize[2]), name='Mask')

    TrainOperation(PatchPH, IPH, MaskPH, TrainNames, TestNames, NumTrainSamples, ImageSize, PatchSize,
                   NumEpochs, MiniBatchSize, OptimizerParams, SaveCheckPoint, CheckPointPath, NumTestRunsPerEpoch,
                   DivTrain, LatestFile, LossFuncName, BasePath, LogsPath, SymType)
コード例 #5
0
def main():
    """
    Inputs: 
    None
    Outputs:
    Runs Testing code
    """
    # TODO: Make LogDir
    # TODO: Display time to end and cleanup other print statements with color
    # TODO: Make logging file a parameter

    # Parse Command Line arguments
    Parser = argparse.ArgumentParser()
    Parser.add_argument('--ModelPath', dest='ModelPath', default='/media/nitin/Research/EVDodge/CheckpointsDeblurHomographyLR1e-4Epochs400/399model.ckpt',\
                                                         help='Path to load latest model from, Default:ModelPath')
    Parser.add_argument('--ReadPath', dest='ReadPath', default='/media/nitin/Research/EVDodge/DatasetChethanEvents/Deblurred',\
                                                                             help='Path to load images from, Default:ReadPath')
    Parser.add_argument('--WritePath', dest='WritePath', default='/media/nitin/Research/EVDodge/DatasetChethanEvents/DeblurredHomography',\
                                                                             help='Path to load images from, Default:WritePath')
    Parser.add_argument(
        '--GPUDevice',
        type=int,
        default=0,
        help='What GPU do you want to use? -1 for CPU, Default:0')
    Parser.add_argument(
        '--CropType',
        dest='CropType',
        default='C',
        help=
        'What kind of crop do you want to perform? R: Random, C: Center, Default: C'
    )

    Args = Parser.parse_args()
    ModelPath = Args.ModelPath
    ReadPath = Args.ReadPath
    WritePath = Args.WritePath
    GPUDevice = Args.GPUDevice
    CropType = Args.CropType

    # Set GPUNum
    tu.SetGPU(GPUDevice)

    # Setup all needed parameters including file reading
    TrainNames, ImageSize, PatchSize, NumTrainSamples = SetupAll(ReadPath)

    # Define PlaceHolder variables for Input and Predicted output
    PatchPH = tf.placeholder(tf.float32,
                             shape=(1, PatchSize[0], PatchSize[1],
                                    PatchSize[2] * 2),
                             name='Input')
    I1PH = tf.placeholder(tf.float32,
                          shape=(1, PatchSize[0], PatchSize[1], PatchSize[2]),
                          name='I1')
    I2PH = tf.placeholder(tf.float32,
                          shape=(1, PatchSize[0], PatchSize[1], PatchSize[2]),
                          name='I2')

    if (not os.path.exists(WritePath)):
        cprint("WARNING: %s doesnt exist, Creating it." % WritePath, 'yellow')
        os.mkdir(WritePath)

    TestOperation(PatchPH, I1PH, I2PH, PatchSize, ModelPath, ReadPath,
                  WritePath, TrainNames, NumTrainSamples, CropType)
コード例 #6
0
def TestOperation(PatchPH, I1PH, I2PH, PatchSize, ModelPath, ReadPath,
                  WritePath, TrainNames, NumTrainSamples, CropType):
    """
    Inputs: 
    ImgPH is the Input Image placeholder
    HomingVecPH is the ground truth  homing vector placeholder
    NumTrainSamples - length(Train)
    ModelPath - Path to load trained model from
    DataPath - Paths of all images where testing will be run on
    Outputs:
    Predictions written to ./TxtFiles/PredOut.txt
    """
    # Generate indexes for center crop of train size
    CenterX = PatchSize[1] / 2
    CenterY = PatchSize[0] / 2
    RandX = np.ceil(CenterX - PatchSize[1] / 2)
    RandY = np.ceil(CenterY - PatchSize[0] / 2)
    p1 = (RandX, RandY)
    p2 = (RandX, RandY + PatchSize[0])
    p3 = (RandX + PatchSize[1], RandY + PatchSize[0])
    p4 = (RandX + PatchSize[1], RandY)

    AllPts = [p1, p2, p3, p4]

    # Predict output with forward pass, MiniBatchSize for Test is 1
    prHVal = EVHomographyNetUnsupSmall(PatchPH, PatchSize, 1)
    prHVal = tf.reshape(prHVal, (-1, 8, 1))

    HMat = stn.solve_DLT(1, AllPts, prHVal)

    # Warp I1 to I2
    out_size = [128, 128]
    WarpI1 = stn.transform(out_size, HMat, 1, I1PH)

    # Setup Saver
    Saver = tf.train.Saver()

    with tf.Session() as sess:
        Saver.restore(sess, ModelPath)
        tu.FindNumParams(1)
        PredOuts = open(WritePath + os.sep + 'PredOuts.txt', 'w')
        for dirs in tqdm(next(os.walk(ReadPath))[1]):
            CurrReadPath = ReadPath + os.sep + dirs
            NumFilesInCurrDir = len(glob.glob(CurrReadPath + os.sep + '*.png'))
            I = cv2.imread(CurrReadPath + os.sep + "event_%d" % 1 + '.png')
            CurrWritePath = WritePath + os.sep + dirs
            # Create Write Folder if doesn't exist
            if (not os.path.exists(CurrWritePath)):
                os.makedirs(CurrWritePath)

            for ImgNum in tqdm(range(0, NumFilesInCurrDir)):
                # for StackNum in range(0, NumImgsStack):
                INow = cv2.imread(CurrReadPath + os.sep + "event_%d" %
                                  (ImgNum + 1) + '.png')
                Rho = [25]  # [10, 10]
                IBatch, I1Batch, I2Batch, AllPtsBatch, PerturbPtsBatch, H4PtColBatch, MaskBatch = GenerateBatch(
                    INow, Rho, PatchSize, CropType, Vis=False)
                # TODO: Better way is to feed data into a MiniBatch and Extract it again
                # INow = np.hsplit(INow, 2)[0] # Imgs have a stack of 2 in this case, hence extract one

                FeedDict = {PatchPH: IBatch, I1PH: I1Batch, I2PH: I2Batch}
                prHPredVal = sess.run(prHVal, FeedDict)
                prHTrue = np.float32(np.reshape(H4PtColBatch[0],
                                                (-1, 4, 2)))[0]
                ErrorNow = np.sum(
                    np.sqrt((prHPredVal[:, 0] - prHTrue[:, 0])**2 +
                            (prHPredVal[:, 1] - prHTrue[:, 1])**2)) / 4
                # print(ErrorNow)
                # print(prHPredVal)
                # print(prHTrue)
                # a = input('a')
                # Timer1 = tic()
                # WarpI1Ret = sess.run(WarpI1, FeedDict)
                # cv2.imshow('a', WarpI1Ret[0])
                # cv2.imshow('b', I1Batch[0])
                # cv2.imshow('c', I2Batch[0])
                # cv2.imshow('d', np.abs(WarpI1Ret[0]- I2Batch[0]))
                # cv2.waitKey(0)

                # print(toc(Timer1))

                # WarpI1Ret = WarpI1Ret[0]
                # Remap to [0,255] range
                # WarpI1Ret = np.uint8(remap(WarpI1Ret, 0.0, 255.0, np.amin(WarpI1Ret), np.amax(WarpI1Ret)))
                # Crop out junk pixels as they are appended in top left corner due to padding
                # WarpI1Ret = WarpI1Ret[-PatchSize[0]:, -PatchSize[1]:, :]

                # IStacked = np.hstack((WarpI1Ret, I2Batch[0]))
                # Write Image to file
                # cv2.imwrite(CurrWritePath + os.sep + 'events' + os.sep +  "event_%d"%(ImgNum+1) + '.png', IStacked)
                PredOuts.write(dirs + os.sep + "event_%d" % ImgNum + '.png' +
                               '\t' + str(ErrorNow) + '\n')
        PredOuts.close()
コード例 #7
0
ファイル: TrainEVSegNet.py プロジェクト: zhjpqq/EVDodgeNet
def main():
    """
    Inputs: 
    None
    Outputs:
    Runs the Training and testing code based on the Flag
    """
    # TODO: Make LogDir
    # TODO: Make logging file a parameter
    # TODO: Time to complete print

    # Parse Command Line arguments
    Parser = argparse.ArgumentParser()
    Parser.add_argument(
        '--BasePath',
        default=
        '/media/nitin/Research/EVDodge/DatasetChethanEvents/DeblurredHomography',
        help=
        'Base path of images, Default:/media/nitin/Research/EVDodge/processed')
    Parser.add_argument('--NumEpochs',
                        type=int,
                        default=200,
                        help='Number of Epochs to Train for, Default:200')
    Parser.add_argument(
        '--DivTrain',
        type=int,
        default=1,
        help='Factor to reduce Train data by per epoch, Default:1')
    Parser.add_argument('--MiniBatchSize',
                        type=int,
                        default=256,
                        help='Size of the MiniBatch to use, Default:256')
    Parser.add_argument(
        '--LoadCheckPoint',
        type=int,
        default=0,
        help=
        'Load Model from latest Checkpoint from CheckPointsPath?, Default:0')
    Parser.add_argument(
        '--GPUDevice',
        type=int,
        default=0,
        help='What GPU do you want to use? -1 for CPU, Default:0')
    Parser.add_argument(
        '--CheckPointPath',
        default='../CheckpointsSeg/',
        help='Path to save checkpoints, Default:../CheckpointsSeg/')
    Parser.add_argument(
        '--LogsPath',
        default='/media/nitin/Research/EVDodge/LogsSeg/',
        help='Path to save Logs, Default:/media/nitin/Research/EVDodge/LogsSeg/'
    )
    Parser.add_argument('--LR',
                        type=float,
                        default=1e-3,
                        help='Learning Rate, Default: 1e-3')
    Parser.add_argument(
        '--MaxFrameDiff',
        type=int,
        default=1,
        help='Maximum Frame difference to feed into network, Default: 1')

    Args = Parser.parse_args()
    NumEpochs = Args.NumEpochs
    BasePath = Args.BasePath
    DivTrain = float(Args.DivTrain)
    MiniBatchSize = Args.MiniBatchSize
    LoadCheckPoint = Args.LoadCheckPoint
    GPUDevice = Args.GPUDevice
    CheckPointPath = Args.CheckPointPath
    LogsPath = Args.LogsPath
    LearningRate = Args.LR
    MaxFrameDiff = Args.MaxFrameDiff

    # Set GPUDevice
    tu.SetGPU(GPUDevice)

    # If CheckPointPath doesn't exist make the path
    if (not (os.path.isdir(CheckPointPath))):
        os.makedirs(CheckPointPath)

    # If LogsPath doesn't exist make the path
    if (not (os.path.isdir(LogsPath))):
        os.makedirs(LogsPath)

    # Setup all needed parameters including file reading
    TrainNames, TrainLabels, OptimizerParams, SaveCheckPoint, Factor, ImageSize, NumTrainSamples = SetupAll(
        BasePath, LearningRate)

    # Find Latest Checkpoint File
    if LoadCheckPoint == 1:
        LatestFile = FindLatestModel(CheckPointPath)
    else:
        LatestFile = None

    # Pretty print stats
    PrettyPrint(NumEpochs, DivTrain, MiniBatchSize, NumTrainSamples,
                LatestFile)

    # Define PlaceHolder variables for Input and Predicted output
    ImgPH = tf.placeholder(tf.float32,
                           shape=(MiniBatchSize, ImageSize[0], ImageSize[1],
                                  6),
                           name='Input')
    I1PH = tf.placeholder(tf.float32,
                          shape=(MiniBatchSize, ImageSize[0], ImageSize[1], 3),
                          name='I1')
    I2PH = tf.placeholder(tf.float32,
                          shape=(MiniBatchSize, ImageSize[0], ImageSize[1], 3),
                          name='I2')
    LabelPH = tf.placeholder(tf.float32,
                             shape=(MiniBatchSize, ImageSize[0], ImageSize[1],
                                    2),
                             name='Label')  # 2 classes

    TrainOperation(ImgPH, I1PH, I2PH, LabelPH, TrainNames, TrainLabels,
                   NumTrainSamples, Factor, ImageSize, NumEpochs,
                   MiniBatchSize, OptimizerParams, SaveCheckPoint,
                   CheckPointPath, DivTrain, LatestFile, MaxFrameDiff,
                   LogsPath, BasePath)