Beispiel #1
0
def maybe_download_and_extract(target_directory, scene_url):
    '''
    This method will try to download the requested url, if 
    a file with the url name has already been downloaded it
    will not proceed.
    '''
    filename = scene_url.split('/')[-1]
    filepath = create_filename(target_directory, filename)
    if not is_file(filepath):

        def _progress(count, block_size, total_size):
            sys.stdout.write('\rDownloading %s %.1f%%' %
                             (filename, float(count * block_size) /
                              float(total_size) * 100.0))
            sys.stdout.flush()

        filepath, _ = urllib.request.urlretrieve(scene_url, filepath,
                                                 _progress)
        sys.stdout.write('')
        sys.stdout.flush()
        statinfo = os.stat(filepath)
        logger.info('Successfully downloaded: %s %s bytes' %
                    (filename, statinfo.st_size))
    else:
        logger.info('%s was already downloaded at %s' %
                    (filename, target_directory))
Beispiel #2
0
 def intersect(self, output_directory, geometry):
     layer = self.get_layer()
     layer_name = layer.GetName()
     spatial_reference = layer.GetSpatialRef()
     
     inSpatialRef = osr.SpatialReference()
     inSpatialRef.ImportFromEPSG(4326)
     
     
     coordTransform = osr.CoordinateTransformation(inSpatialRef, spatial_reference)
 
     geometry.Transform(coordTransform)
             
     in_feature = layer.GetNextFeature()
     layer_definition = layer.GetLayerDefn()
     field_definition = layer_definition.GetFieldDefn(0)
     column_name = field_definition.GetName() 
     shape_files = []
     create_directory_path(output_directory)
     in_layer_definition = layer.GetLayerDefn()
     output_name = output_directory       
     print output_name
     if is_file(output_name):
         self.driver.DeleteDataSource(output_name)
     data_source = self.driver.CreateDataSource(output_name)
     out_layer = data_source.CreateLayer(layer_name, spatial_reference, geom_type=ogr.wkbPolygon)
     for i in range(0, in_layer_definition.GetFieldCount()):
         fieldDefn = in_layer_definition.GetFieldDefn(i)
         out_layer.CreateField(fieldDefn)
     while in_feature:            
         outLayerDefn = out_layer.GetLayerDefn()
         feature_geometry = in_feature.GetGeometryRef()
         out_feature = ogr.Feature(outLayerDefn)            
         
         print geometry.ExportToWkt()
         print feature_geometry.ExportToWkt()
         if geometry.Intersect(feature_geometry):
             print 'It intersects!!!!'
             intersection = geometry.Intersection(feature_geometry)
             out_feature.SetGeometry(intersection)
             for i in range(0, outLayerDefn.GetFieldCount()):
                 out_feature.SetField(outLayerDefn.GetFieldDefn(i).GetNameRef(), in_feature.GetField(i))
             out_layer.CreateFeature(out_feature)
         out_feature = None
         in_feature = None
         in_feature = layer.GetNextFeature()
     self.close()
     return Data(output_name)
Beispiel #3
0
 def split(self, output_directory, column=0):
     '''
     This method will take a input shape and iterate over its features, creating
     a new shape file with each one of them. It copies all the fields and the
     same spatial reference from the original file. The created files are saved
     in the destination directory using the number of the field given. 
     '''
     layer = self.get_layer()
     layer_name = layer.GetName()
     spatial_reference = layer.GetSpatialRef()
     in_feature = layer.GetNextFeature()
     layer_definition = layer.GetLayerDefn()
     field_definition = layer_definition.GetFieldDefn(0)
     column_name = field_definition.GetName() 
     shape_files = []
     create_directory_path(output_directory)
     in_layer_definition = layer.GetLayerDefn()
     while in_feature:            
         in_feature_name = in_feature.GetField(column_name)
         output_name = create_filename(output_directory, '%s.shp' % in_feature_name)
         shape_files.append(output_name)
         if is_file(output_name):
             self.driver.DeleteDataSource(output_name)
         data_source = self.driver.CreateDataSource(output_name)
         out_layer = data_source.CreateLayer(layer_name, spatial_reference, geom_type=ogr.wkbPolygon)
         for i in range(0, in_layer_definition.GetFieldCount()):
             fieldDefn = in_layer_definition.GetFieldDefn(i)
             out_layer.CreateField(fieldDefn)
         outLayerDefn = out_layer.GetLayerDefn()
         geometry = in_feature.GetGeometryRef()
         out_feature = ogr.Feature(outLayerDefn)
         out_feature.SetGeometry(geometry)
         for i in range(0, outLayerDefn.GetFieldCount()):
             out_feature.SetField(outLayerDefn.GetFieldDefn(i).GetNameRef(), in_feature.GetField(i))
         out_layer.CreateFeature(out_feature)
         
         out_feature = None
         in_feature = None
         in_feature = layer.GetNextFeature()
     self.close()
     return [Data(filename) for filename in shape_files]
Beispiel #4
0
def create_shape_from_json(id, json, output_directory):
    '''
    Given a json string containing coordinates, this method creates a shape file.
    '''
    create_directory_path(output_directory)
    filename = create_filename(output_directory, '%s.shp' % id)
    shape = Data(filename)
    if is_file(filename):
        shape.driver.DeleteDataSource(filename)
    data_source = shape.driver.CreateDataSource(filename)
    spatial_reference = osr.SpatialReference()
    spatial_reference.ImportFromEPSG(4326)    
    layer = data_source.CreateLayer(str('layer'), spatial_reference, geom_type=ogr.wkbPolygon)
    layer.CreateField(ogr.FieldDefn(str('id'), ogr.OFTString))
    feature = ogr.Feature(layer.GetLayerDefn())
    feature.SetField(str('id'), str(id))
    geometry = ogr.CreateGeometryFromJson(str(json))
    feature.SetGeometry(geometry)
    layer.CreateFeature(feature)
    shape.close()
    return shape
Beispiel #5
0
    def handle(self, **options):

        mapgrid = '1449619'

        acq = get_pair_quality(mapgrid)

        for image in acq:

            print image.pk_id
            print image.pk_id

        id = options["id"][0]
        image_path = options["image"][0]
        reference_path = options["reference"][0]
        output = options["output"][0]

        print image_path
        print reference_path

        image_bundle = _get_bundle_from_path(image_path)
        reference_bundle = _get_bundle_from_path(reference_path)

        #extents = harmonize_images([image_bundle.get_raster(), reference_bundle.get_raster()])
        #print extents
        #print extents['x_offset']
        #print extents['y_offset']

        shape = image_bundle.get_raster().get_attribute(raster.DATA_SHAPE)

        invariant_array = numpy.full((shape[0], shape[1]),
                                     INV_MASK_VALUE,
                                     dtype=np.int)

        in1 = reference_bundle.get_raster_file()
        in2 = image_bundle.get_raster_file()
        in_invar = create_filename(output, 'invariantPixelMask.tif')
        result = create_filename(output, 'crosscorrelation_next.tif')
        to_polar = create_filename(output, 'crosscorrelation_polar.tif')
        create_raster_from_reference(in_invar, invariant_array,
                                     image_bundle.get_raster_file(),
                                     gdal.GDT_Byte)

        local = LocalProcessLauncher()
        volume = '%s:%s' % (output, output)
        shell_array = [
            'docker', 'run', '--rm', '-v', volume, 'madmex/antares',
            'correlation', '-in1', in1, '-in2', in2, '-in_invar', in_invar,
            '-val_invar',
            '%s' % INV_MASK_VALUE, '-out', result, '-window_size',
            '%s' % WINDOW_SIZE, '-max_gap',
            '%s' % MAX_GAP
        ]
        shell_string = ' '.join(shell_array)

        print shell_string

        if not is_file(result):
            log = local.execute(shell_string)

        crosscorrelation = raster.Data(result, 'GTiff')

        print crosscorrelation.get_attribute(raster.PROJECTION)
        print crosscorrelation.get_attribute(raster.GEOTRANSFORM)

        #tile_map(result, result)

        correlation_array = crosscorrelation.read_data_file_as_array()

        band_0 = correlation_array[0, :]
        band_1 = correlation_array[1, :]

        phi_band = phi(band_0, band_1)
        rho_band = rho(band_0, band_1)

        correlation_array[0, :] = phi_band
        correlation_array[1, :] = rho_band

        #create_raster_from_reference(to_polar, correlation_array, result)

        crosscorrelation_polar = raster.Data(to_polar, 'GTiff')

        extents = harmonize_images(
            [crosscorrelation_polar,
             reference_bundle.get_raster()])
        x_offset = extents['x_offset'][1]
        y_offset = extents['y_offset'][1]
        x_tile_size = extents['x_range']
        y_tile_size = extents['y_range']
        aux_name = create_filename(output, 'auxiliar.tif')
        tile_map(reference_bundle.get_raster_file(), aux_name, x_tile_size,
                 y_tile_size, x_offset, y_offset)
        aux_array = raster.Data(aux_name, 'GTiff').read_data_file_as_array()
        crosscorrelation_polar_array = crosscorrelation_polar.read_data_file_as_array(
        )
        stats = calculate_statistics_qa(crosscorrelation_polar_array,
                                        aux_array, STAT_CLASSES, STAT_MIN,
                                        STAT_MAX, THRESHOLD_COD, THRESHOLD_LOG)
        desision = calculate_decision(stats['band_1']['histogram'],
                                      stats['band_1']['histogram_bins'])

        print stats

        quality = QualityAssessment(
            decision=desision,
            max=adapt_numpy_float(stats['band_1']['maximum']),
            min=adapt_numpy_float(stats['band_1']['minimum']),
            median=adapt_numpy_float(stats['band_1']['median']),
            mean=adapt_numpy_float(stats['band_1']['mean']),
            standard_deviation=adapt_numpy_float(stats['band_1']['std']),
            product_id=1,
            reference_id=2)
        persist_quality(quality)

        print desision
Beispiel #6
0
    def handle(self, **options):
        '''
        In this example command, the values that come from the user input are
        added up and the result is printed in the screen.
        '''
        output = options['output'][0]
        models = options['modelname']
        model_directory = options['modeldir'][0]
        region = options['region'][0]

        start_time = time.time()

        for path in options['path']:
            print path

            scene_bundle = rapideye.Bundle(path)
            directory = getattr(SETTINGS, 'TEMPORARY')
            directory_helper = create_filename(directory, 'helper')
            create_directory_path(directory_helper)
            categories_file = create_filename(directory, 'categories.json')
            categories_dictionaty = {
                0: "AGRICULTURA DE RIEGO",
                1: "AGRICULTURA DE TEMPORAL",
                2: "AGUA",
                3: "AREAS QUEMADAS",
                4: "ASENTAMIENTOS HUMANOS",
                5: "BOSQUE CULTIVADO",
                6: "BOSQUE DE AYARIN",
                7: "BOSQUE DE ENCINO",
                8: "BOSQUE DE ENCINO-PINO",
                9: "BOSQUE DE GALERIA",
                10: "BOSQUE DE MEZQUITE",
                11: "BOSQUE DE OYAMEL",
                12: "BOSQUE DE PINO",
                13: "BOSQUE DE PINO-ENCINO",
                14: "BOSQUE INDUCIDO",
                15: "BOSQUE MESOFILO DE MONTANA",
                16: "DESPROVISTO DE VEGETACION",
                17: "INDEFINIDO",
                18: "MANGLAR",
                19: "MATORRAL SUBTROPICAL",
                20: "MEZQUITAL",
                21: "NUBES",
                22: "PASTIZAL CULTIVADO",
                23: "PASTIZAL HALOFILO",
                24: "PASTIZAL INDUCIDO",
                25: "PASTIZAL NATURAL",
                26: "PRADERA DE ALTA MONTANA",
                27: "SABANOIDE",
                28: "SELVA ALTA PERENNIFOLIA",
                29: "SELVA ALTA SUBPERENNIFOLIA",
                30: "SELVA BAJA CADUCIFOLIA",
                31: "SELVA BAJA ESPINOSA CADUCIFOLIA",
                32: "SELVA BAJA SUBCADUCIFOLIA",
                33: "SELVA DE GALERIA",
                34: "SELVA MEDIANA CADUCIFOLIA",
                35: "SELVA MEDIANA SUBCADUCIFOLIA",
                36: "SELVA MEDIANA SUBPERENNIFOLIA",
                37: "SIN VEGETACION APARENTE",
                38: "SOMBRAS",
                39: "TULAR",
                40: "VEGETACION DE DUNAS COSTERAS",
                41: "VEGETACION HALOFILA HIDROFILA",
                42: "ZONA URBANA"
            }
            basename = get_basename(scene_bundle.get_raster_file())
            all_file = create_filename(directory_helper,
                                       '%s_all_features.tif' % basename)

            if not is_file(all_file):
                scene_bundle.get_feature_array(all_file)

            filename = get_basename(all_file)

            if not is_file(
                    create_filename(directory_helper, '%s.shp' % filename)):
                shell_string = 'docker run --rm -v %s:/data madmex/segment gdal-segment %s.tif -out helper/%s.shp -algo SLIC -region %s' % (
                    directory, filename, filename, region)
                launcher = LocalProcessLauncher()
                LOGGER.debug('Docker command: %s', shell_string)
                launcher.execute(shell_string)

            data = read_data_table(
                create_filename(directory_helper, '%s.shp' % filename))

            results = {}

            for model_name in models:
                persistence_directory = create_filename(
                    model_directory, model_name)
                print model_name
                model = load_model(model_name)
                model_instance = model.Model(persistence_directory)
                model_instance.load(persistence_directory)
                prediction = model_instance.predict(data)
                results[model_name] = prediction
            print results
            create_directory_path(output)
            write_results(
                create_filename(directory_helper, '%s.shp' % filename),
                create_filename(output,
                                '%s_classification.shp' % filename[0:32]),
                results, categories_dictionaty)

        LOGGER.info("--- %s seconds ---" % (time.time() - start_time))
Beispiel #7
0
    def handle(self, **options):
        '''
        In this example command, the values that come from the user input are
        added up and the result is printed in the screen.
        '''
        target_tag = 'DN'
        start_time_all = time.time()
        shape_name = options['shape'][0]
        raster_paths = options['path']
        destination = options['dest']
        models = options['model']
        dataframe_features = None
        temporary_directory = getattr(SETTINGS, 'TEMPORARY')
        create_directory_path(temporary_directory)
        # I read the training data in shape form
        training_shape = vector.Data(shape_name)
        training_dataframe = training_shape.to_dataframe()
        training_path = create_filename(temporary_directory,
                                        'training_raster.tif')
        categories_file = create_filename(temporary_directory,
                                          'categories.json')
        training_warped_path = create_filename(temporary_directory,
                                               'training_warped_raster.tif')
        pixel_size = 0.000462175996292

        if not is_file(training_warped_path):
            training_raster = vector_to_raster(
                training_shape, training_path, pixel_size, -pixel_size,
                ['ATTRIBUTE=OBJECTID', 'COMPRESS=LZW'])
            training_raster_warped = training_raster.reproject(
                training_warped_path, epgs=32617)
        else:
            training_raster_warped = raster.Data(training_warped_path)

        dem_file = getattr(SETTINGS, 'DEM')

        dem_raster = raster.Data(dem_file)
        print dem_raster.get_spatial_reference()
        print 'reproyecting raster'
        #dem_raster_warped = dem_raster.reproject(training_warped_path, epgs=32614)

        #training_raster_warped = raster.Data(training_path)

        aspect_file = getattr(SETTINGS, 'ASPECT')
        slope_file = getattr(SETTINGS, 'SLOPE')

        print dem_file, aspect_file, slope_file

        for raster_path in raster_paths:
            scene_bundle = rapideye.Bundle(raster_path)

            raster_mask = scene_bundle.get_raster()

            #example_path = create_filename(temporary_directory, 'mask')
            #create_directory_path(example_path)
            #raster_to_vector_mask(raster_mask, example_path)

            print scene_bundle.get_raster_file()

            basename = get_basename(scene_bundle.get_raster_file())
            all_file = create_filename(temporary_directory,
                                       '%s_all_features.tif' % basename)
            # Do not recalculate if the file is already there.
            if is_file(all_file):
                features_raster = raster.Data(all_file)
            else:
                features_raster = scene_bundle.get_feature_array(all_file)
            new_df = get_dataframe_from_raster(features_raster,
                                               training_raster_warped)
            if new_df is not None:
                if dataframe_features is not None:
                    dataframe_features = pandas.concat([
                        dataframe_features,
                        get_dataframe_from_raster(features_raster,
                                                  training_raster_warped)
                    ])
                else:
                    dataframe_features = get_dataframe_from_raster(
                        features_raster, training_raster_warped)

        features_size = len(list(dataframe_features))

        training_set = dataframe_features.set_index(0).join(
            training_dataframe.set_index('OBJECTID'))

        print training_set

        training_set['target'] = pandas.Categorical.from_array(
            training_set[target_tag]).labels
        categories_array = pandas.Categorical.from_array(
            training_set[target_tag]).categories
        create_categories_file(categories_file, categories_array)
        training_set = training_set[training_set['target'] != -1]
        #features_size includes 0 that is the index of the feature
        training_set_array = numpy.transpose(
            numpy.transpose(training_set.as_matrix([range(1, features_size)])))
        target_set_array = training_set.pop('target')

        print training_set_array.shape
        print target_set_array.shape

        X_train, X_test, y_train, y_test = train_test_split(training_set_array,
                                                            target_set_array,
                                                            train_size=0.8,
                                                            test_size=0.2)
        models_directory = create_filename(temporary_directory, 'models')
        create_directory_path(models_directory)

        for model_name in models:
            start_time = time.time()
            print numpy.unique(y_train)
            train_model(X_train, X_test, y_train, y_test, models_directory,
                        model_name)
            print "--- %s seconds training %s model---" % (
                (time.time() - start_time), model_name)