postConstraintsLayerSize = config.constraintParms['postConstraintsLayerSize']
defaultLossFunc = config.modelParms['defaultLossFunction']
lossRotScale = config.modelParms['lossRotScale']
optimizerType = config.modelParms['optimizerType']
model_yaml_filename = config.modelParms['modelYamlFilename']

# Callbacks
checkpoint_filename = config.checkpointParms['filename']
history_filename    = config.trainingParms['histFilename']


# Get Files

modelYamlsDict = config.trainPaths['model']
modelYamls = config.getOutputFiles(modelYamlsDict, True)
saveModelPath = config.getFolderRef(modelYamls)
model_filepath = os.path.join(saveModelPath, model_yaml_filename)

checkpointFilesDict = config.trainPaths['checkpoint']
checkpointFiles = config.getOutputFiles(checkpointFilesDict, True)
saveCheckpointPath = config.getFolderRef(checkpointFiles)
checkpoint_filepath = os.path.join(saveCheckpointPath, checkpoint_filename)

historyFilesDict = config.trainPaths['history']
historyFiles = config.getOutputFiles(historyFilesDict, True)
saveHistoryPath = config.getFolderRef(historyFiles)
history_filepath = os.path.join(saveHistoryPath, history_filename)

# Get Callbacks List
callbacksList = getCallbacksList(config, history_filepath, checkpoint_filepath)
Exemple #2
0
        print('%sReading Camera %s, Sequence %02d' % (' ' * 2, cam, seq))

        calFile = config.getInputFiles(calFilesDict, seq)
        if (calFile is None):
            continue

        origImages = config.getInputFiles(origImageFilesDict, seq, cam)
        if (origImages is None):
            continue

        standardImages = config.getOutputFiles(standardImageFilesDict,
                                               recalcStandardImages, seq, cam)
        if (standardImages is None):
            continue

        standardImageFolder = config.getFolderRef(standardImages)

        # get cal matrix for sequence
        cameraCalMat = getCalMat(calFile, cam)
        map_x, map_y = getMaps(destImageShape, standardDict['overlap'],
                               cameraCalMat)

        numImages = len(origImages)
        for idx, origImageName in enumerate(origImages):

            if showProgress:
                percentComplete = int(idx / numImages * 100)
                if divmod(idx, 300)[1] == 0:
                    print('Percent Complete: %d%%' % percentComplete)

            srcImage = cv.cvtColor(cv.imread(origImageName), cv.COLOR_BGR2GRAY)
Exemple #3
0
def loadPredictions(tests, eType):
    errors_list = []
    preds_list = []
    truth_list = []
    filetruthrots_list = []

    for i, test in enumerate(tests):
        # if True:
        #     test = tests[0]
        configFile = 'exp_configs/%s.yaml' % test
        config = ThesisConfig(configFile)

        # Parameters
        name = config.expName
        numOutputs = config.modelParms['numOutputs']

        # Get Files
        evalFilesDict = config.resultPaths['evaluations']
        figFilesDict = config.resultPaths['figures']
        truthFilesDict = config.kittiPrepared['truth']

        figFolder = config.getFolderRef(
            config.getOutputFiles(figFilesDict, True))

        # Get Predictions Save File
        evalFolder = ''
        for pathSection in evalFilesDict['dir']:
            evalFolder = os.path.join(evalFolder, pathSection)
        predictionsFile = os.path.join(evalFolder, eType + '_predictions.hdf5')

        if os.path.exists(predictionsFile):

            # Load Predictions
            y_pred_real, y_true_real, evalType, min_val_epoch, min_val_loss = loadPredFile(
                predictionsFile)

            print('Min Validation Loss: %s, Epoch %s' %
                  (min_val_loss, min_val_epoch))

            # get test idxs
            # numTested = y_true_real.shape[0]
            splitFilesDict = config.kittiPrepared['split']
            splitFile = config.getInputFiles(splitFilesDict)
            with h5py.File(splitFile, 'r') as f:
                if evalType == 'test':
                    turnIdxs = np.array(f['testTurnIdxs'])
                    nonTurnIdxs = np.array(f['testNonTurnIdxs'])
                elif evalType == 'val':
                    turnIdxs = np.array(f['valTurnIdxs'])
                    nonTurnIdxs = np.array(f['valNonTurnIdxs'])
            idxs = np.sort(np.concatenate(
                (turnIdxs, nonTurnIdxs)))  #[:numTested]

            truthData = np.empty((0, 3))
            for seq in config.usedSeqs:
                truthFile = config.getInputFiles(truthFilesDict, seq)
                with h5py.File(truthFile, 'r') as f:
                    rot_xyz = np.array(f['rot_xyz'])
                truthData = np.append(truthData, rot_xyz, axis=0)
            file_truth_rots = truthData[idxs, :]

            # Calculate average loss in each direction
            errors = y_true_real - y_pred_real

            errors_list.append(errors)
            preds_list.append(y_pred_real)
            truth_list.append(y_true_real)
            filetruthrots_list.append(file_truth_rots)

        else:
            print('predictions file %s does not exist' % predictionsFile)

    return (errors_list, preds_list, truth_list, filetruthrots_list)
Exemple #4
0
print('v Downsampling Images and Normalizing Pixels')

for cam in usedCams:
    for seqStr in kittiSeqs:
        seq = int(seqStr)
        print('%sReading Camera %s, Sequence %02d' % (' ' * 2, cam, seq))

        standardImages = config.getInputFiles(standardImageFilesDict, seq, cam)
        if (standardImages is None):
            continue

        normImages = config.getOutputFiles(normImageFilesDict, recalcNormImages, seq, cam)
        if (normImages is None):
            continue

        normImageFolder = config.getFolderRef(normImages)

        numImages = len(standardImages)
        for idx, standardImageName in enumerate(standardImages):

            if showProgress:
                percentComplete = int(idx/numImages*100)
                if divmod(idx,300)[1]==0:
                    print('Percent Complete: %d%%' % percentComplete)

            srcImage = cv.cvtColor(cv.imread(standardImageName), cv.COLOR_BGR2GRAY)
            dstImage = cv.resize(srcImage, dsize=destImageShape[::-1], interpolation=cv.INTER_AREA)

            srcImage = dstImage
            dstImage = cv.normalize(srcImage, None, normRange[0], normRange[1], cv.NORM_MINMAX, dtype=cv.CV_32F)
Exemple #5
0

for configFile in configFiles:
    config = ThesisConfig(configFile)



    # Parameters
    name = config.expName
    numOutputs      = config.modelParms['numOutputs']

    # Get Files
    evalFilesDict = config.resultPaths['evaluations']
    figFilesDict = config.resultPaths['figures']

    figFolder = config.getFolderRef(config.getOutputFiles(figFilesDict, True))




    # Get Predictions Save File
    evalFolder = ''
    for pathSection in evalFilesDict['dir']:
        evalFolder = os.path.join(evalFolder, pathSection)
    predictionsFile = os.path.join(evalFolder, evalType+'_predictions.hdf5')


    if os.path.exists(predictionsFile):

        # Load Predictions
        with h5py.File(predictionsFile, 'r') as f:
Exemple #6
0
from src.helpers.helper_functions import getOptimizer
from src.helpers.training_helpers import getCallbacksList, getGenerator, getTrainAndValGenerators
from src.helpers.custom_loss import scaledMSE_RT

import tensorflow as tf
from keras import backend as K
import keras.losses

# Load Configuration
configFile = 'exp_configs/scale_test_4.yaml'
config = ThesisConfig(configFile)

checkpoint_filename = config.checkpointParms['filename']
checkpointFilesDict = config.trainPaths['checkpoint']
checkpointFiles = config.getOutputFiles(checkpointFilesDict, True)
saveCheckpointPath = config.getFolderRef(checkpointFiles)
checkpoint_filepath = os.path.join(saveCheckpointPath, checkpoint_filename)

numOutputs = config.modelParms['numOutputs']
lossRotScale = config.modelParms['lossRotScale']
defaultLossFunc = config.modelParms['defaultLossFunction']
if numOutputs > 3:
    lossFunc = scaledMSE_RT(lossRotScale)
    keras.losses.lossFunction = lossFunc
else:
    lossFunc = defaultLossFunc

modelPath = checkpoint_filepath.format(epoch=1)
model = load_model(modelPath)

model.summary()
Exemple #7
0
# test = tests[0]
# if True:
#
for test in tests:
    configFile = os.path.join('exp_configs', test + '.yaml')
    config = ThesisConfig(configFile)

    numOutputs = config.modelParms['numOutputs']
    lossRotScale = config.modelParms['lossRotScale']
    defaultLossFunc = config.modelParms['defaultLossFunction']

    checkpointFilesDict = config.trainPaths['checkpoint']
    checkpointFiles = config.getInputFiles(checkpointFilesDict)
    if isinstance(checkpointFiles, str):
        checkpointFiles = np.array([checkpointFiles])
    checkpointFolder = config.getFolderRef(checkpointFiles)

    # Loss Function
    if numOutputs > 3:
        lossFunc = scaledMSE_RT(lossRotScale)
        keras.losses.lossFunction = lossFunc
    else:
        lossFunc = defaultLossFunc

    learning_rates = []
    epochs = []
    # FOR EACH MODEL
    totalEpochs = len(checkpointFiles)
    for modelFile in checkpointFiles:
        t = time.time()
        K.clear_session()
Exemple #8
0
#                   'exp_configs/CNN_test_12.yaml',
#                   'exp_configs/CNN_test_13.yaml',
#                   'exp_configs/CNN_test_14.yaml',
#                   'exp_configs/CNN_test_15.yaml',
#                   'exp_configs/CNN_test_16.yaml',
#                   'exp_configs/CNN_test_17.yaml']

configFileList = ['exp_configs/scale_test_3.yaml']

for configFile in configFileList:
    config = ThesisConfig(configFile)

    history_filename = config.trainingParms['histFilename']
    historyFilesDict = config.trainPaths['history']
    historyFiles = config.getInputFiles(historyFilesDict)
    saveHistoryPath = config.getFolderRef(historyFiles)
    history_filepath = os.path.join(saveHistoryPath, history_filename)
    saveFigFilesDict = config.resultPaths['figures']
    figureFile = config.getOutputFiles(saveFigFilesDict, True)
    saveFigFolder = config.getFolderRef(figureFile)
    saveFigFilename = config.experiment['experiment'][
        'name'] + config.resultPaths['figures']['type']
    saveFigFile = os.path.join(saveFigFolder, saveFigFilename)

    if os.path.exists(history_filepath):
        print()
        with h5py.File(history_filepath, 'r') as f:
            epochs = np.array(f['epochs'], dtype=np.int)
            numEpochs = len(epochs)
            if 'loss' in f:
                train_loss = np.array(f['loss'])
Exemple #9
0
                           shuffle=False)

    # LOAD MODEL

    min_val_loss, min_val_epoch = getBestValLoss(config)
    print('Min Validation Loss: %s, Epoch %s' % (min_val_loss, min_val_epoch))

    # Loss Function
    if numOutputs > 3:
        lossFunc = scaledMSE_RT(lossRotScale)
        keras.losses.lossFunction = lossFunc
    else:
        lossFunc = defaultLossFunc

    checkpointFiles = config.getInputFiles(checkpointFilesDict)
    checkpointFolder = config.getFolderRef(checkpointFiles)
    checkpointFile = 'epoch_%03d%s' % (min_val_epoch,
                                       checkpointFilesDict['type'])
    modelFile = os.path.join(checkpointFolder, checkpointFile)
    model = load_model(modelFile)

    # PREDICT
    y_true_norm = np.empty((0, numOutputs))
    y_pred_norm = np.empty((0, numOutputs))
    total = len(seqGen)
    for idx, data in enumerate(seqGen):
        percentComplete = int(idx / total * 100)
        if divmod(idx, 50)[1] == 0:
            print('Percent Complete: %d%%' % percentComplete)

        x, y = data