filtsize, lossWeights, DDx_new, loss_function, regFactor))
    mkdir(calibOutputDir)
    calibOutputPath = join(calibOutputDir, 'outputFilters.rawImage')

    if use_tfrecords:
        trainFilePaths = recordhandler.ConvertDatabaseToTFRecords(
            trainPath, join(trainPath, 'tfrecords'), maxExamples=maxNExamples)
        validFilePaths = recordhandler.ConvertDatabaseToTFRecords(
            validPath, join(validPath, 'tfrecords'), maxExamples=maxNExamples)
    else:
        # get train database
        myCreator = DatasetCreator(trainPath,
                                   NCube=NCube,
                                   NDD=NDD,
                                   maxNExamples=maxNExamples)
        myCreator.cropDDWidth(DDx_new)
        train_database = myCreator.getDataset()

        # get validation database
        myCreator = DatasetCreator(validPath,
                                   NCube=NCube,
                                   NDD=NDD,
                                   maxNExamples=maxNExamples)
        myCreator.cropDDWidth(DDx_new)
        valid_database = myCreator.getDataset()

        assert (train_database['Cubes'].shape[0] % batchSize == 0)
        assert (valid_database['Cubes'].shape[0] % batchSize == 0)

    NDD[1] = DDx_new
    mylearningrate = [0.01] * 100 + [0.001] * 100
Exemplo n.º 2
0
def calibEstimatorSanityTest2_createData():
    logfiledir = _LOG_FILE_DIR
    validDir = join(logfiledir, 'Valid')
    trainDir = join(logfiledir, 'Train')

    # define sizes for tests:
    sysPaths = SystemSettings.getSystemPaths('Server')
    sysDims = SystemSettings.getSystemDimensions()
    NCube = sysDims.NCube  # Cube [y, x, lambda] image size
    NDD = list(sysDims.NDD)  # DD [y,x] image size
    NFilt = sysDims.NFilt  # number of coefficients to be estimated for each lambda filter
    DDx_new = sysDims.DDx_new  # the amount of Data influenced by a filter of size 300
    NChannels = NCube[2]

    numTrainExamples = 1000
    numValidExamples = 200

    NCube_train = (numTrainExamples * NCube[0], 1, NCube[1], NCube[2])
    NCube_valid = (numValidExamples * NCube[0], 1, NCube[1], NCube[2])

    # Cube_train = np.random.standard_normal(NCube_train).astype(dtype=np.float32)
    # Cube_valid = np.random.standard_normal(NCube_valid).astype(dtype=np.float32)

    dataCreator = DatasetCreator(directory=sysPaths.trainPath,
                                 NCube=NCube,
                                 NDD=NDD)
    dataCreator.cropDDWidth(DDx_crop=DDx_new)
    train_dataset = dataCreator.getDataset()
    Cube_train = train_dataset['Cubes']
    # Cube_std = np.std(Cube_train)
    #Cube_train = Cube_train / Cube_std
    del train_dataset

    # print('calibEstimatorSanityTest2_createData: Cube: std: {}, mean: {}, min: {}, max: {}'.format(
    #     np.std(Cube_train), np.mean(Cube_train), np.min(Cube_train), np.max(Cube_train)
    # ))

    dataCreator = DatasetCreator(directory=sysPaths.validPath,
                                 NCube=NCube,
                                 NDD=NDD)
    dataCreator.cropDDWidth(DDx_crop=DDx_new)
    valid_dataset = dataCreator.getDataset()
    Cube_valid = valid_dataset['Cubes']
    # Cube_valid = Cube_valid / Cube_std
    del valid_dataset

    Filts_GT = np.squeeze(imhand.readImage(_FILTERS_GT_PATH))
    # crop Filts_GT to the shape of NFilt
    crop_remove_size = int((Filts_GT.shape[1] - NFilt) / 2)
    Filts_GT = Filts_GT[1:32, crop_remove_size:crop_remove_size + NFilt]
    # Filts_GT = np.random.normal(loc=0.0, scale=1.0, size=(31, 301)).astype(dtype=np.float32)

    print('calibEstimatorSanityTest2_createData: Filters size: ({}x{})'.format(
        Filts_GT.shape[0], Filts_GT.shape[1]))

    NDD[1] = DDx_new  # directly use DDx_new instead of the original size which is too big

    DD_train = np.zeros((NCube_train[0], 1, NDD[1], 1), np.float32)
    DD_valid = np.zeros((NCube_valid[0], 1, NDD[1], 1), np.float32)

    # create the DD (Y) image:
    cEst = CalibEstimator(NX=NCube,
                          NY=NDD,
                          L=NChannels,
                          NFilt=NFilt,
                          learningRate=0.01,
                          batchSize=128,
                          a0=Filts_GT)
    cEst.setModeEval()
    cEst.createNPArrayDatasets()
    cEst.buildModel()

    DD_train = cEst.eval(Xeval=Cube_train, Yeval=DD_train)
    DD_valid = cEst.eval(Xeval=Cube_valid, Yeval=DD_valid)

    cEst.resetModel()

    # save results:
    # filters:
    filters_str = join(logfiledir, 'filters_GT.rawImage')
    imhand.writeImage(Filts_GT, filters_str)

    # save training data:
    for ii in range(numTrainExamples):
        cube_str = join(trainDir, 'Img_{}_Cube.rawImage'.format(ii))
        DD_str = join(trainDir, 'Img_{}_DD.rawImage'.format(ii))
        imhand.writeImage(
            np.squeeze(Cube_train[ii * 256:(ii + 1) * 256, :, :, :]), cube_str)
        imhand.writeImage(np.squeeze(DD_train[ii * 256:(ii + 1) * 256, :]),
                          DD_str)

    # save validation data:
    for ii in range(numValidExamples):
        cube_str = join(validDir, 'Img_{}_Cube.rawImage'.format(ii))
        DD_str = join(validDir, 'Img_{}_DD.rawImage'.format(ii))
        imhand.writeImage(
            np.squeeze(Cube_valid[ii * 256:(ii + 1) * 256, :, :, :]), cube_str)
        imhand.writeImage(np.squeeze(DD_valid[ii * 256:(ii + 1) * 256, :]),
                          DD_str)