def buildPatchFromScan( wrongPixelCenters, folderStore, scanName, scanPath, multispectralImagePath, panchromaticImagePath, actualPixelPointMachine, classifierInformation, isTest=False, ): # Show feedback if not isTest: print "Saving patch..." # Initialize multispectralImage = image_store.load(multispectralImagePath) panchromaticImage = image_store.load(panchromaticImagePath) windowLengthInMeters = classifierInformation.getWindowLengthInMeters() patchTestFraction = ( classifierInformation.getTestDataset().countSamples() / float(classifierInformation.getTrainingDataset().countSamples()) if not isTest else 0.2 ) # Set path targetPatchPath = folderStore.fillPatchPath(scanName + " auto") targetPatchFolderPath = os.path.dirname(targetPatchPath) # Define shortcut buildPatchEasily = lambda makePath, pixelCenters: buildPatch( makePath(targetPatchFolderPath), pixelCenters, actualPixelPointMachine, multispectralImage, panchromaticImage, windowLengthInMeters, ) # Shuffle random.shuffle(wrongPixelCenters) # Prepare information = { "patches": {"probability name": scanName, "probability path": scanPath}, "windows": { "window length in meters": windowLengthInMeters, "spatial reference": multispectralImage.getSpatialReference(), }, } if not isTest: # Split patchTestCount = int(patchTestFraction * len(wrongPixelCenters)) # Build information["training set"] = buildPatchEasily( sample_process.makeTrainingPath, wrongPixelCenters[patchTestCount:] ).getStatistics() information["test set"] = buildPatchEasily( sample_process.makeTestPath, wrongPixelCenters[:patchTestCount] ).getStatistics() # Save store.saveInformation(targetPatchPath, information)
def saveWeakClassifierInformation(targetPath, trainingPath, testPath, parameterByName, extraBySectionByOption): # Assemble valueBySectionByOption = { 'classifierModule': {'moduleName': classifier.__name__}, 'classifier': parameterByName, 'trainingSet': {'sampleCount': classifier.getSampleCount(trainingPath)}, 'testSet': {'sampleCount': classifier.getSampleCount(testPath)}, } valueBySectionByOption.update(extraBySectionByOption) # Save store.saveInformation(targetPath, valueBySectionByOption)
def extractDataset(targetDatasetPath, paths, parameters): # Unpack multispectralImagePath, panchromaticImagePath, positiveLocationPath = paths windowGeoLength, negativeRatio, multispectralPixelShiftValue, shiftCount = parameters # Show feedback view.sendFeedback('Extracting dataset...\n\ttargetDatasetPath = %s' % targetDatasetPath) # Extract samples extractor = Extractor(targetDatasetPath, windowGeoLength, multispectralPixelShiftValue, shiftCount, negativeRatio) extractor.extractSamples(positiveLocationPath, multispectralImagePath, panchromaticImagePath) # Record targetDataset = extractor.getSampleDatabase() multispectralImage = image_store.load(multispectralImagePath) panchromaticImage = image_store.load(panchromaticImagePath) points, spatialReference = point_store.load(positiveLocationPath) store.saveInformation(targetDatasetPath, { 'multispectral image': { 'path': multispectralImagePath, 'pixel width': multispectralImage.getPixelWidth(), 'pixel height': multispectralImage.getPixelHeight(), 'geo transform': multispectralImage.getGeoTransform(), }, 'panchromatic image': { 'path': panchromaticImagePath, 'pixel width': panchromaticImage.getPixelWidth(), 'pixel height': panchromaticImage.getPixelHeight(), 'geo transform': panchromaticImage.getGeoTransform(), }, 'positive location': { 'path': positiveLocationPath, 'location count': len(points), 'spatial reference': spatialReference, }, 'windows': { 'path': targetDatasetPath, 'sample count': targetDataset.countSamples(), 'positive sample count': targetDataset.countPositiveSamples(), 'negative sample count': targetDataset.countNegativeSamples(), }, 'parameters': { 'window geo length': windowGeoLength, 'multispectral pixel shift value': multispectralPixelShiftValue, 'shift count': shiftCount, 'negative ratio': negativeRatio, } }) # Return return targetDataset
def saveSamples(sampleDataset, sampleIDs, featureSet): # Initialize sampleCount = len(sampleIDs) sampleDatasetPath = sampleDataset.getDatasetPath() sampleInformation = { 'source dataset': { 'path': sampleDatasetPath, 'sample ids': ' '.join(str(x) for x in sampleIDs), }, 'feature': { 'module name': featureSet.__module__, 'class name': featureSet.__class__.__name__, } } targetSampleName = '%s-count%s-min%s' % (folder_store.getFolderName(sampleDatasetPath), sampleCount, min(sampleIDs)) targetSamplePath = os.path.join(store.makeFolderSafely(os.path.join(store.temporaryPath, 'cnn_datasets')), targetSampleName) # If targetDatasetPath exists, return if store.loadInformation(targetSamplePath) == sampleInformation: print 'Using existing samples...\n\ttargetSamplePath = ' + targetSamplePath return targetSamplePath # Save print 'Saving samples...\n\ttargetSamplePath = ' + targetSamplePath sampleGenerator = makeSampleLabelGeneratorFromSampleDataset(sampleDataset, sampleIDs, featureSet) sampleFile, labelFile = [open(x, 'wt') for x in makeSampleLabelPaths(targetSamplePath)] for sampleIndex, (sample, label) in enumerate(sampleGenerator): # If we are starting, write header if sampleIndex == 0: sampleFile.write(makeLushMatrixHeaderFromPart(sample, sampleCount)) labelFile.write(makeLushMatrixHeaderFromPart(label, sampleCount)) # Write content sampleFile.write(makeLushMatrixContent(sample)) labelFile.write(makeLushMatrixContent(label)) if sampleIndex % 100 == 0: view.printPercentUpdate(sampleIndex + 1, sampleCount) view.printPercentFinal(sampleCount) # Return labelFile.close(); sampleFile.close() store.saveInformation(targetSamplePath, sampleInformation) return targetSamplePath