Пример #1
0
def step(taskName, parameterByName, folderStore, options):
    # Get parameters
    trainingSize = parameterByName.get("training size")
    testSize = parameterByName.get("test size")
    positiveFraction = parameterByName.get("positive fraction")
    # Get names
    windowNames = parameterByName.get("window names", [])
    windowPaths = map(folderStore.getWindowPath, windowNames)
    windowFolderPaths = map(os.path.dirname, windowPaths)
    windowInformations = map(folderStore.getWindowInformation, windowNames)
    patchNames = parameterByName.get("patch names", [])
    patchPaths = map(folderStore.getPatchPath, patchNames)
    patchFolderPaths = map(os.path.dirname, patchPaths)
    patchInformations = map(folderStore.getPatchInformation, patchNames)
    # Set
    sourceInformations = windowInformations + patchInformations
    sourceFolderPaths = windowFolderPaths + patchFolderPaths
    # Make sure that each dataset has the same windowGeoLength
    windowLengthInMeters = store.validateSame(
        [x.getWindowLengthInMeters() for x in sourceInformations],
        "Datasets must have the same window length in meters: %s" % taskName,
    )
    # Make sure that each dataset has the same spatialReference
    spatialReference = store.validateSame(
        [x.getSpatialReference() for x in sourceInformations],
        "Datasets must have the same spatial reference: %s" % taskName,
    )
    # Set
    targetDatasetPath = folderStore.fillDatasetPath(taskName)
    targetDatasetFolderPath = os.path.dirname(targetDatasetPath)
    # Record
    information = {
        "parameters": {"training size": trainingSize, "test size": testSize, "positive fraction": positiveFraction},
        "windows": {"spatial reference": spatialReference, "window length in meters": windowLengthInMeters},
        "sources": {
            "window names": store.stringifyList(windowNames),
            "window paths": store.stringifyList(windowPaths),
            "patch names": store.stringifyList(patchNames),
            "patch paths": store.stringifyList(patchPaths),
        },
    }
    # Combine training and test sets
    if not options.is_test:
        print "Combining datasets...\n\ttargetDatasetPath = %s" % targetDatasetPath
        information["training set"] = sample_process.combineDatasets(
            sample_process.makeTrainingPath(targetDatasetFolderPath),
            map(sample_process.makeTrainingPath, sourceFolderPaths),
            trainingSize,
            positiveFraction,
        ).getStatistics()
        information["test set"] = sample_process.combineDatasets(
            sample_process.makeTestPath(targetDatasetFolderPath),
            map(sample_process.makeTestPath, sourceFolderPaths),
            testSize,
            positiveFraction,
        ).getStatistics()
    # Save
    store.saveInformation(targetDatasetPath, information)
def step(taskName, parameterByName, folderStore, options):
    # Get parameters
    datasetName = parameterByName['dataset name']
    datasetPath = folderStore.getDatasetPath(datasetName)
    datasetFolderPath = os.path.dirname(datasetPath)
    # Set
    trainingPath = sample_process.makeTrainingPath(datasetFolderPath)
    testPath = sample_process.makeTestPath(datasetFolderPath)
    datasetInformation = dataset_store.Information(trainingPath)
    if not options.is_test:
        # Make sure that training and test sets are not empty
        if not datasetInformation.getTrainingCount():
            raise script_process.ScriptError('Empty training set: %s' % trainingPath)
        if not datasetInformation.getTestCount():
            raise script_process.ScriptError('Empty test set: %s' % testPath)
    # Pack
    classifierModule = store.getLibraryModule(parameterByName['classifier module name'])
    featureModule = store.getLibraryModule(parameterByName['feature module name'])
    featureClass = getattr(featureModule, parameterByName['feature class name'])
    # Run
    targetClassifierPath = folderStore.fillClassifierPath(taskName)
    if not isTest:
        # Build classifier
        resultByName = classifier.build(targetClassifierPath, classifierModule, featureClass(), trainingPath, testPath, parameterByName)
    else:
        resultByName = {}
    # Record
    makeDictionary = lambda keys: dict((key, parameterByName[key]) for key in keys if key in parameterByName)
    parameterInformation = makeDictionary(['classifier module name', 'feature module name', 'feature class name'])
    parameterInformation.update(makeDictionary(classifierModule.relevantParameterNames))
    store.saveInformation(targetClassifierPath, {
        'parameters': parameterInformation,
        'dataset': {
            'name': datasetName,
            'path': datasetPath,
        },
        'windows': {
            'training': trainingPath,
            'test': testPath,
            'window length in meters': datasetInformation.getWindowLengthInMeters(),
            'spatial reference': datasetInformation.getSpatialReference(),
        },
        'performance': resultByName,
    })
def sampleWindows(targetWindowPath, region, location, parameterByName, options=None):
    # Get parameters
    exampleCountPerRegion = parameterByName['example count per region']
    multispectralPixelShiftValue = parameterByName['multispectral pixel shift value']
    shiftCount = parameterByName['shift count']
    # Prepare regionFrames



    regionSet = region.getDataset()
    # regionDataset = region_store.load(region.path)
    regionFrames = regionDataset.getRegionFrames()
    regionFrameCount = len(regionFrames)
    # Prepare counts
    testRegionSet = region.getTestDataset()
    # testRegionDataset = region_store.load(regionInformation.getTestRegionPath())
    testFractionPerRegion = regionInformation.getTestFractionPerRegion()
    # Load imageDataset
    imagePath = folderStore.getImagePath(regionInformation.getImageName())
    imageInformation = image_store.Information(imagePath)
    multispectralImage = image_store.load(imageInformation.getMultispectralImagePath())
    panchromaticImage = image_store.load(imageInformation.getPanchromaticImagePath())
    # Load locations
    positiveGeoLocations, spatialReference = point_store.load(imageInformation.getPositiveLocationPath())
    # Convert
    windowLengthInMeters = regionInformation.getWindowLengthInMeters()
    windowPixelDimensions = multispectralImage.convertGeoDimensionsToPixelDimensions(windowLengthInMeters, windowLengthInMeters)
    positivePixels = multispectralImage.convertGeoLocationsToPixelLocations(positiveGeoLocations)
    # Place examples
    exampleMachine = region_process.ExampleMachine(positivePixels, exampleCountPerRegion, testFractionPerRegion, testRegionDataset, windowPixelDimensions, multispectralPixelShiftValue, shiftCount)
    examplePacks = []
    if options and not options.is_test:
        print 'Placing examples in %s regions for "%s"...' % (regionFrameCount, taskName)
        for regionFrame in regionFrames:
            examplePacks.append(exampleMachine.placeInFrame(regionFrame))
            exampleCount = len(examplePacks)
            if exampleCount % 10 == 0: 
                view.printPercentUpdate(exampleCount, regionFrameCount)
        view.printPercentFinal(regionFrameCount)
    exampleInformation = {}
    trainingPaths = []
    testPaths = []
    # Set
    targetWindowFolderPath = os.path.dirname(targetWindowPath)
    if options and not options.is_test:
        # For each exampleName,
        for exampleName in examplePacks[0].keys():
            # Convert
            examplePixelLocations = sum((x[exampleName] for x in examplePacks), [])
            exampleGeoLocations = multispectralImage.convertPixelLocationsToGeoLocations(examplePixelLocations)
            examplePath = os.path.join(targetWindowFolderPath, exampleName)
            exampleLabel = 1 if 'positive' in exampleName else 0
            # Save
            point_store.save(examplePath, exampleGeoLocations, spatialReference)
            exampleInformation[exampleName + ' count'] = len(examplePixelLocations)
            # Extract
            print 'Extracting %s windows for %s...' % (len(examplePixelLocations), exampleName)
            window_process.extract(examplePath, exampleGeoLocations, exampleLabel, windowLengthInMeters, multispectralImage, panchromaticImage)
            (testPaths if 'test' in exampleName else trainingPaths).append(examplePath)
    # Record
    information = {
        'windows': {
            'window length in meters': windowLengthInMeters,
            'spatial reference': spatialReference,
        },
        'regions': {
            'name': regionName,
            'path': regionPath,
            'count': regionFrameCount,
        },
        'examples': exampleInformation,
    }
    # Combine
    if options and not options.is_test:
        information['training set'] = sample_process.combineDatasets(sample_process.makeTrainingPath(targetWindowFolderPath), trainingPaths).getStatistics()
        information['test set'] = sample_process.combineDatasets(sample_process.makeTestPath(targetWindowFolderPath), testPaths).getStatistics()
    # Save information
    store.saveInformation(targetWindowPath, information)