Beispiel #1
0
    def gdal_reproject(
        cls,
        src: Union[str, gdal.Dataset],
        output_file: str = None,  # The filepath of the output image to write to.
        src_srs: osr.SpatialReference = None,
        dst_srs: osr.SpatialReference = None,  # Defaults to epsg if None
        epsg: int = None,
        error_threshold: float = 0.125,
        resampling: gdalconst = gdalconst.GRA_NearestNeighbour
    ) -> Optional["GDALGrid"]:
        """ Reproject a raster image. """
        src_ds = cls.load_raster(src)[0]

        if dst_srs is None:
            dst_srs = osr.SpatialReference()
            dst_srs.ImportFromEPSG(int(epsg))

        dst_wkt = dst_srs.ExportToWkt()

        if not isinstance(resampling, int):
            resampling = getattr(gdal, resampling)

        src_wkt = None
        if src_srs is not None:
            src_wkt = src_srs.ExportToWkt()

        reprojected_ds = gdal.AutoCreateWarpedVRT(src_ds, src_wkt, dst_wkt,
                                                  resampling, error_threshold)
        if output_file:
            gdal.GetDriverByName('GTiff').CreateCopy(output_file,
                                                     reprojected_ds)
        return GDALGrid(reprojected_ds)
Beispiel #2
0
    def write_esri_grid_ascii_file(path,
                                   data,
                                   extent,
                                   cellsize,
                                   nodata_value=-9999.0):
        xllcorner, yllcorner, *_ = extent.as_list()
        nrows, ncols = data.shape

        header_dict = OrderedDict()
        header_dict['nrows'] = nrows
        header_dict['ncols'] = ncols
        header_dict['xllcorner'] = xllcorner
        header_dict['yllcorner'] = yllcorner
        header_dict['cellsize'] = cellsize
        header_dict['nodata_value'] = nodata_value
        header = '\n'.join(
            ['{} {}'.format(key, val) for key, val in header_dict.items()])

        numpy.savetxt(path, data, header=header, comments='')

        # save projection info to adjacent file
        ref = SpatialReference()
        ref.ImportFromProj4(extent.projection.srs)
        with open(path.replace('asc', 'prj'), 'w') as f:
            f.write(ref.ExportToWkt())
Beispiel #3
0
 def reproject(self, dstSRS: osr.SpatialReference, **kwargs):
     resampling = kwargs.get('resampling', gdalconst.GRA_NearestNeighbour)
     nx = kwargs.get('nx', None)
     ny = kwargs.get('ny', None)
     resolution = kwargs.get('resolution', None)
     newbounds = self.bounds(as_projection=dstSRS)
     mem_drv = gdal.GetDriverByName('MEM')
     nBands = self.dataset.RasterCount
     if resolution is not None:
         nx, ny = int(round(
             (newbounds[1] - newbounds[0]) / resolution[0])), int(
                 round((newbounds[3] - newbounds[2]) / resolution[1]))
     elif nx is not None and ny is not None:
         resolution = [(newbounds[1] - newbounds[0]) / nx,
                       (newbounds[3] - newbounds[2]) / ny]
     dest: gdal.Dataset = mem_drv.Create('', nx, ny, nBands,
                                         gdal.GDT_Float32)
     new_geo = (newbounds[0], resolution[0], 0.0, newbounds[3], 0.0,
                -resolution[1])
     srcWkt = self.wkt
     destWkt = dstSRS.ExportToWkt()
     dest.SetGeoTransform(new_geo)
     dest.SetProjection(destWkt)
     res = gdal.ReprojectImage(self.dataset, dest, srcWkt, destWkt,
                               resampling)
     return GDALGrid(dest)
Beispiel #4
0
def get_utm_wkt(coordinate, from_wkt):
    '''
    Function to return CRS for UTM zone of specified coordinates.
    Used to transform coords to metres
    @param coordinate: single coordinate pair
    '''
    def utm_getZone(longitude):
        return (int(1 + (longitude + 180.0) / 6.0))

    def utm_isNorthern(latitude):
        if (latitude < 0.0):
            return 0
        else:
            return 1

    coordinate_array = np.array(coordinate).reshape((1, 2))

    latlon_coord_trans = get_coordinate_transformation(from_wkt, 'EPSG:4326')
    latlon_coord = coordinate if latlon_coord_trans is None else latlon_coord_trans.TransformPoints(
        coordinate_array)[0][0:2]

    # Set UTM coordinate reference system
    utm_spatial_ref = SpatialReference()
    utm_spatial_ref.SetWellKnownGeogCS('WGS84')
    utm_spatial_ref.SetUTM(utm_getZone(latlon_coord[0]),
                           utm_isNorthern(latlon_coord[1]))

    return utm_spatial_ref.ExportToWkt()
Beispiel #5
0
def reproj_convert_layer(geojson_path,
                         output_path,
                         file_format,
                         output_crs,
                         input_crs="epsg:4326"):
    layer_name = output_path.split('/')
    layer_name = layer_name[len(layer_name) - 1].split('.')[0]

    in_driver = GetDriverByName("GeoJSON")
    out_driver = GetDriverByName(file_format)

    inSpRef = SpatialReference()
    inSpRef.ImportFromEPSG(int(input_crs.split("epsg:")[1]))

    outSpRef = SpatialReference()
    ret_val = outSpRef.ImportFromProj4(output_crs)
    if not ret_val == 0:
        raise ValueError("Error when importing the output crs")

    coords_transform = CoordinateTransformation(inSpRef, outSpRef)

    f_in = in_driver.Open(geojson_path)
    input_layer = f_in.GetLayer()
    f_out = out_driver.CreateDataSource(output_path)
    output_layer = f_out.CreateLayer(layer_name, outSpRef)

    input_lyr_defn = input_layer.GetLayerDefn()
    for i in range(input_lyr_defn.GetFieldCount()):
        fieldDefn = input_lyr_defn.GetFieldDefn(i)
        output_layer.CreateField(fieldDefn)

    output_lyr_defn = output_layer.GetLayerDefn()

    for inFeature in input_layer:
        geom = inFeature.GetGeometryRef()
        outFeature = OgrFeature(output_lyr_defn)
        if geom:
            geom.Transform(coords_transform)
            outFeature.SetGeometry(geom)
        else:
            outFeature.SetGeometry(None)

        for i in range(output_lyr_defn.GetFieldCount()):
            outFeature.SetField(
                output_lyr_defn.GetFieldDefn(i).GetNameRef(),
                inFeature.GetField(i))
        output_layer.CreateFeature(outFeature)
        outFeature.Destroy()
        inFeature.Destroy()
    f_in.Destroy()
    f_out.Destroy()

    if "Shapefile" in file_format:
        outSpRef.MorphToESRI()
        with open(output_path.replace(".shp", ".prj"), 'w') as file_proj:
            file_proj.write(outSpRef.ExportToWkt())
        with open(output_path.replace(".shp", ".cpg"), "w") as encoding_file:
            encoding_file.write("ISO-8859-1")
    return 0
Beispiel #6
0
def alt_downloader(tile_nr_x, tile_nr_y):

    # construct url
    url = www_folder + str(tile_nr_x) + '/' + str(tile_nr_y) + '.asc.zip'

    try:
        # create a momemory driver and dataset on it
        driver = gdal.GetDriverByName('MEM')
        ds = driver.Create('', TD['NCOLS'], TD['NROWS'], 1, gdal.GDT_Float32)

        # access the zip file
        zf = ZipFile(BytesIO(requests.get(url).content))

        # read the rasterfile in the format of an array
        lines = zf.open(zf.infolist()[0]).readlines()

        # NOTE:
        # in the following lines we use some properties that the .asc files on our server have:
        # - carriage returns are used to separate header items and rows
        # - the header is 6 lines, that is, there is a NO DATA value in line 6
        # - the data starts in line 7 (index 6)
        # these are not required by the standard, c.f.
        # http://help.arcgis.com/en/arcgisdesktop/10.0/help/index.html#/ESRI_ASCII_raster_format/009t0000000z000000/
        # in particular, NO DATA is optional and carriage returns may be replaced by spaces

        # read the geo transform

        #as from http://geoexamples.blogspot.com/2012/01/creating-files-in-ogr-and-gdal-with.html:

        #geotransform = (left x-coordinate, x-cellsize, rotation ?,upper y-coordinate,rotation,y-cellsize)

        #Xgeo = geotransform[0] + Xpixel*geotransform[1] + Yline*geotransform[2]
        #Ygeo = geotransform[3] + Xpixel*geotransform[4] + Yline*geotransform[5]

        #for some reason, y-cellsize must be negative here

        geo_trafo = (float(lines[2].split()[1]), TD['CELLSIZE'], 0,
                     float(lines[3].split()[1]) + TD['CELLSIZE'] * TD['NROWS'],
                     0, -TD['CELLSIZE'])

        ds.SetGeoTransform(geo_trafo)

        # read and write the data to the dataset
        arr = list(map(lambda x: list(map(float, x.split())), lines[6:]))
        zf.close()
        band = ds.GetRasterBand(1)
        band.WriteArray(array(arr))

        # set the spatial reference system (probably not necessary)
        proj = SpatialReference()
        proj.SetWellKnownGeogCS("EPSG:31287")
        ds.SetProjection(proj.ExportToWkt())

        return ds

    except:
        return None
Beispiel #7
0
def getWKT_PRJ(epsg_code): # generate a .prj file based off epsg from input
    # as of 4.16/2019, spatialreference.org is down. Use GDAL API instead

    #wkt = urllib.urlopen("http://spatialreference.org/ref/epsg/{0}/prettywkt/".format(epsg_code))
    #remove_spaces = wkt.read().replace(" ","")
    #output = remove_spaces.replace("\n", "")

    from osgeo.osr import SpatialReference

    srs = SpatialReference()
    srs.ImportFromEPSG(epsg_code)
    outWKT = srs.ExportToWkt()
    return str(outWKT)
Beispiel #8
0
def get_identifier_int(crs):
    """
    Given a CRS, generate a stable, unique identifer for it of type 'int'. Eg: 2193
    """
    if isinstance(crs, str):
        crs = SpatialReference(crs)
    if not isinstance(crs, SpatialReference):
        raise RuntimeError(f"Unrecognised CRS: {crs}")
    auth_code = crs.GetAuthorityCode(None)
    if auth_code and auth_code.isdigit() and int(auth_code) > 0:
        return int(auth_code)
    # Stable code that fits easily in an int32 and won't collide with EPSG codes.
    return (hash(crs.ExportToWkt()) & 0xFFFFFFF) + 1000000
Beispiel #9
0
def copy_into_wrapper(dest, gdal_file_info, geotransform):
    srs = SpatialReference()
    srs.ImportFromEPSG(3857)
    gdal_file_info.projection = srs.ExportToWkt()
    gdal_file_info.geotransform = geotransform

    gdal_file_info.ulx = geotransform[0]
    gdal_file_info.uly = geotransform[3]
    gdal_file_info.lrx = gdal_file_info.ulx + geotransform[
        1] * gdal_file_info.xsize
    gdal_file_info.lry = gdal_file_info.uly + geotransform[
        5] * gdal_file_info.ysize
    for band in range(1, gdal_file_info.bands + 1):
        gdal_file_info.copy_into(dest, band, band)
Beispiel #10
0
def create_target_image(x_size,
                        y_size,
                        bbox,
                        res,
                        bands=3,
                        band_type=1,
                        t_name='out.tif',
                        t_format='GTiff'):
    srs = SpatialReference()
    srs.ImportFromEPSG(3857)
    left, bottom, right, top = bbox
    #XXX kai: fetch AttributeError when unsupported format
    driver = gdal.GetDriverByName(t_format)
    dest = driver.Create(t_name, x_size, y_size, bands, band_type, [])

    dest.SetGeoTransform([left, res, 0, top, 0, -res])
    dest.SetProjection(srs.ExportToWkt())
    return dest
Beispiel #11
0
    def write_tiff(self):
        # https://stackoverflow.com/questions/59821554/converting-vector-shp-to-raster-tiff-using-python-gdal-library
        # making the shapefile as an object.
        input_shp = ogr.Open(self.file)

        # getting layer information of shapefile.
        shp_layer = input_shp.GetLayer()  # type: Layer

        # get extent values to set size of output raster.
        x_min, x_max, y_min, y_max = shp_layer.GetExtent()

        # calculate size/resolution of the raster.
        pixel_size_x = float((x_max - x_min) / self.height)
        pixel_size_y = float((y_max - y_min) / self.width)

        # get GeoTiff driver by
        image_type = 'GTiff'
        driver = gdal.GetDriverByName(image_type)

        # passing the filename, x and y direction resolution, no. of bands, new raster.
        new_raster = driver.Create(self.file_w_ext('tiff'), self.width,
                                   self.height, 1, gdal.GDT_Byte)

        # transforms between pixel raster space to projection coordinate space.
        new_raster.SetGeoTransform(
            (x_min, pixel_size_x, 0, y_min, 0, pixel_size_y))

        # get required raster band.
        band = new_raster.GetRasterBand(1)

        # assign no data value to empty cells.
        no_data_value = -9999
        band.SetNoDataValue(no_data_value)
        band.FlushCache()

        # main conversion method
        gdal.RasterizeLayer(new_raster, [1], shp_layer, burn_values=[255])

        # adding a spatial reference
        new_raster_sr = SpatialReference()
        new_raster_sr.ImportFromEPSG(2975)
        new_raster.SetProjection(new_raster_sr.ExportToWkt())

        return new_raster
Beispiel #12
0
def create_tif(fp: str,
               extent: List[float],
               srs: osr.SpatialReference,
               pixel_size: int,
               num_bands: int = 1) -> gdal.Dataset:
    '''Returns a new .tif file'''
    logging.info(f'creating .tif : {fp}')
    driver = gdal.GetDriverByName('GTiff')

    # delete if exists
    if os.path.exists(fp):
        logging.info('deleting existing .tif')
        driver.Delete(fp)

    res = get_pixels(extent, pixel_size)

    # create tif
    ds = driver.Create(fp, *res, num_bands, gdal.GDT_UInt16)
    ds.SetGeoTransform((extent[0], pixel_size, 0, extent[3], 0, -pixel_size))
    ds.SetProjection(srs.ExportToWkt())

    return ds
Beispiel #13
0
    def set_netcdf_metadata_attributes(
            self, to_crs='EPSG:4326', do_stats=False):
        '''
        Function to set all NetCDF metadata attributes using self.METADATA_MAPPING to map from NetCDF ACDD global attribute name to metadata path (e.g. xpath)
        Parameter:
            to_crs: EPSG or WKT for spatial metadata
            do_stats: Boolean flag indicating whether minmax stats should be determined (slow)
        '''
        assert self.METADATA_MAPPING, 'No metadata mapping defined'
        assert self._netcdf_dataset, 'NetCDF output dataset not defined.'
#        assert self._metadata_dict, 'No metadata acquired'

        # Set geospatial attributes
        try:
            grid_mapping = [variable.grid_mapping for variable in self._netcdf_dataset.variables.values(
            ) if hasattr(variable, 'grid_mapping')][0]
        except:
            logger.error(
                'Unable to determine grid_mapping for spatial reference')
            raise

        crs = self._netcdf_dataset.variables[grid_mapping]

        spatial_ref = crs.spatial_ref
        geoTransform = [float(string)
                        for string in crs.GeoTransform.strip().split(' ')]
        xpixels, ypixels = (
            dimension.size for dimension in self._netcdf_dataset.dimensions.values())
        dimension_names = (
            dimension.name for dimension in self._netcdf_dataset.dimensions.values())

        # Create nested list of bounding box corner coordinates
        bbox_corners = [[geoTransform[0] + (x_pixel_offset * geoTransform[1]) + (y_pixel_offset * geoTransform[2]),
                         geoTransform[3] + (x_pixel_offset * geoTransform[4]) + (y_pixel_offset * geoTransform[5])]
                        for x_pixel_offset in [0, xpixels]
                        for y_pixel_offset in [0, ypixels]]

        if to_crs:  # Coordinate transformation required
            from_spatial_ref = SpatialReference()
            from_spatial_ref.ImportFromWkt(spatial_ref)

            to_spatial_ref = SpatialReference()
            # Check for EPSG then Well Known Text
            epsg_match = re.match('^EPSG:(\d+)$', to_crs)
            if epsg_match:
                to_spatial_ref.ImportFromEPSG(int(epsg_match.group(1)))
            else:  # Assume valid WKT definition
                to_spatial_ref.ImportFromWkt(to_crs)

            coord_trans = CoordinateTransformation(
                from_spatial_ref, to_spatial_ref)

            extents = np.array(
                [coord[0:2] for coord in coord_trans.TransformPoints(bbox_corners)])
            spatial_ref = to_spatial_ref.ExportToWkt()

            centre_pixel_coords = [coord[0:2] for coord in coord_trans.TransformPoints(
                [[geoTransform[0] + (x_pixel_offset * geoTransform[1]) + (y_pixel_offset * geoTransform[2]),
                  geoTransform[3] + (x_pixel_offset * geoTransform[4]) + (y_pixel_offset * geoTransform[5])]
                 for x_pixel_offset in [xpixels // 2, xpixels // 2 + 1]
                 for y_pixel_offset in [ypixels // 2, ypixels // 2 + 1]]
            )
            ]

            # Use Pythagoras to compute centre pixel size in new coordinates
            # (never mind the angles)
            yres = pow(pow(centre_pixel_coords[1][0] - centre_pixel_coords[0][0], 2) + pow(
                centre_pixel_coords[1][1] - centre_pixel_coords[0][1], 2), 0.5)
            xres = pow(pow(centre_pixel_coords[2][0] - centre_pixel_coords[0][0], 2) + pow(
                centre_pixel_coords[2][1] - centre_pixel_coords[0][1], 2), 0.5)

            # TODO: Make this more robust - could pull single unit from WKT
            if to_spatial_ref.IsGeographic():
                xunits, yunits = ('degrees_east', 'degrees_north')
            elif to_spatial_ref.IsProjected():
                xunits, yunits = ('m', 'm')
            else:
                xunits, yunits = ('unknown', 'unknown')

        else:  # Use native coordinates
            extents = np.array(bbox_corners)
            xres = round(geoTransform[1], Geophys2NetCDF.DECIMAL_PLACES)
            yres = round(geoTransform[5], Geophys2NetCDF.DECIMAL_PLACES)
            xunits, yunits = (self._netcdf_dataset.variables[
                              dimension_name].units for dimension_name in dimension_names)

        xmin = np.min(extents[:, 0])
        ymin = np.min(extents[:, 1])
        xmax = np.max(extents[:, 0])
        ymax = np.max(extents[:, 1])

        attribute_dict = dict(zip(['geospatial_lon_min', 'geospatial_lat_min', 'geospatial_lon_max', 'geospatial_lat_max'],
                                  [xmin, ymin, xmax, ymax]
                                  )
                              )
        attribute_dict['geospatial_lon_resolution'] = xres
        attribute_dict['geospatial_lat_resolution'] = yres
        attribute_dict['geospatial_lon_units'] = xunits
        attribute_dict['geospatial_lat_units'] = yunits

        try:
            convex_hull = [coordinate[0:2] for coordinate in coord_trans.TransformPoints(
                netcdf2convex_hull(self.netcdf_dataset, 2000000000))]  # Process dataset in pieces <= 2GB in size
        except:
            logger.info('Unable to compute convex hull. Using rectangular bounding box instead.')
            convex_hull = [coordinate[0:2] for coordinate in coord_trans.TransformPoints(bbox_corners + [bbox_corners[0]])]

        attribute_dict['geospatial_bounds'] = 'POLYGON((' + ', '.join([' '.join(
            ['%.4f' % ordinate for ordinate in coordinates]) for coordinates in convex_hull]) + '))'

        attribute_dict['geospatial_bounds_crs'] = spatial_ref

        for key, value in attribute_dict.items():
            setattr(self._netcdf_dataset, key, value)

        # Set attributes defined in self.METADATA_MAPPING
        # Scan list in reverse to give priority to earlier entries
        #TODO: Improve this coding - it's a bit crap
        keys_read = []
        for key, metadata_path in self.METADATA_MAPPING:
            # Skip any keys already read
            if key in keys_read:
                continue

            value = self.get_metadata(metadata_path)
            if value is not None:
                logger.debug('Setting %s to %s', key, value)
                # TODO: Check whether hierarchical metadata required
                setattr(self._netcdf_dataset, key, value)
                keys_read.append(key)
            else:
                logger.warning(
                    'WARNING: Metadata path %s not found', metadata_path)

        unread_keys = sorted(
            list(set([item[0] for item in self.METADATA_MAPPING]) - set(keys_read)))
        if unread_keys:
            logger.warning(
                'WARNING: No value found for metadata attribute(s) %s' % ', '.join(unread_keys))

        # Ensure only one DOI is stored - could be multiple, comma-separated
        # entries
        if hasattr(self._netcdf_dataset, 'doi'):
            url_list = [url.strip()
                        for url in self._netcdf_dataset.doi.split(',')]
            doi_list = [url for url in url_list if url.startswith(
                'http://dx.doi.org/')]
            if len(url_list) > 1:  # If more than one URL in list
                try:  # Give preference to proper DOI URL
                    url = doi_list[0]  # Use first (preferably only) DOI URL
                except:
                    url = url_list[0]  # Just use first URL if no DOI found
                url = url.replace('&amp;', '&')
                self._netcdf_dataset.doi = url

        # Set metadata_link to NCI metadata URL
        self._netcdf_dataset.metadata_link = 'https://pid.nci.org.au/dataset/%s' % self.uuid

        self._netcdf_dataset.Conventions = 'CF-1.6, ACDD-1.3'

        if do_stats:
            datastats = DataStats(netcdf_dataset=self.netcdf_dataset,
                                  netcdf_path=None, max_bytes=2000000000)  # 2GB pieces
            datastats.data_variable.actual_range = np.array(
                [datastats.value('min'), datastats.value('max')], dtype='float32')

        # Remove old fields - remove this later
        if hasattr(self._netcdf_dataset, 'id'):
            del self._netcdf_dataset.id
        if hasattr(self._netcdf_dataset, 'ga_uuid'):
            del self._netcdf_dataset.ga_uuid
        if hasattr(self._netcdf_dataset, 'keywords_vocabulary'):
            del self._netcdf_dataset.keywords_vocabulary
Beispiel #14
0
 def srs(self):
     sr = SpatialReference()
     sr.ImportFromEPSG(self.srsid)
     return sr.ExportToWkt()
import numpy as np
import unittest
from geometryIO import proj4LL
from mock import patch
from numpy import random
from osgeo.osr import SpatialReference

from ..libraries.satellite_image import ProjectedCalibration
from ..libraries.satellite_image import MetricCalibration
from ..libraries.satellite_image import SatelliteImage

LIBRARY_ROUTE = 'count_buildings.libraries.satellite_image'
spatial_reference = SpatialReference()
spatial_reference.ImportFromEPSG(4326)
WKT = spatial_reference.ExportToWkt()
PIXEL_FRAME = (0, 0), (10, 10)
CALIBRATION_PACK = 0, 0.5, 0, 0, 0, -0.5


class ProjectedCalibrationTest(unittest.TestCase):
    def setUp(self):
        self.calibration_pack = 1, 2, 3, 4, 5, 6
        self.calibration = ProjectedCalibration(self.calibration_pack)

    def test_to_projected_xy(self):
        old_projected_xy = random.random(2)
        new_projected_xy = self.calibration.to_projected_xy(
            self.calibration.to_pixel_xy(old_projected_xy))
        self.assert_((old_projected_xy - new_projected_xy < 0.0000001).all())

Beispiel #16
0
 def srs(cls):
     sr = SpatialReference()
     sr.ImportFromEPSG(cls.srsid())
     return sr.ExportToWkt()
Beispiel #17
0
def reproj_convert_layer(geojson_path, output_path, file_format, output_crs):
    """
    Concert GeoJSON to GML or Shapefile and write it to disk.

    Convert a GeoJSON FeatureCollection to GML or ESRI Shapefile format and
    reproject the geometries if needed (used when the user requests an export).

    Parameters
    ----------
    geojson_path: str
        Path of the input GeoJSON FeatureCollection to be converted.

    output_path: str
        Path for the resulting Shapefile/GML (should be in a directory
        created by tempfile.TemporaryDirectory).

    file_format: str
        The format of the expected result ('ESRI Shapefile' or 'GML' is expected).

    output_crs: str
        The output srs to use (in proj4 string format).

    Returns
    -------
    result_code: int
        Should return 0 if everything went fine..
    """
    ## TODO : Use VectorTranslate to make the conversion?
    input_crs = "epsg:4326"
    layer_name = output_path.split('/')
    layer_name = layer_name[len(layer_name) - 1].split('.')[0]

    in_driver = GetDriverByName("GeoJSON")
    out_driver = GetDriverByName(file_format)

    inSpRef = SpatialReference()
    inSpRef.ImportFromEPSG(int(input_crs.split("epsg:")[1]))

    outSpRef = SpatialReference()
    ret_val = outSpRef.ImportFromProj4(output_crs)
    if not ret_val == 0:
        raise ValueError("Error when importing the output crs")

    coords_transform = CoordinateTransformation(inSpRef, outSpRef)

    f_in = in_driver.Open(geojson_path)
    input_layer = f_in.GetLayer()
    f_out = out_driver.CreateDataSource(output_path)
    output_layer = f_out.CreateLayer(layer_name, outSpRef)

    input_lyr_defn = input_layer.GetLayerDefn()
    for i in range(input_lyr_defn.GetFieldCount()):
        fieldDefn = input_lyr_defn.GetFieldDefn(i)
        output_layer.CreateField(fieldDefn)

    output_lyr_defn = output_layer.GetLayerDefn()

    for inFeature in input_layer:
        geom = inFeature.GetGeometryRef()
        outFeature = OgrFeature(output_lyr_defn)
        if geom:
            geom.Transform(coords_transform)
            outFeature.SetGeometry(geom)
        else:
            outFeature.SetGeometry(None)

        for i in range(output_lyr_defn.GetFieldCount()):
            outFeature.SetField(
                output_lyr_defn.GetFieldDefn(i).GetNameRef(),
                inFeature.GetField(i))
        output_layer.CreateFeature(outFeature)
        outFeature.Destroy()
        inFeature.Destroy()
    f_in.Destroy()
    f_out.Destroy()

    if "Shapefile" in file_format:
        outSpRef.MorphToESRI()
        with open(output_path.replace(".shp", ".prj"), 'w') as file_proj:
            file_proj.write(outSpRef.ExportToWkt())
        with open(output_path.replace(".shp", ".cpg"), "w") as encoding_file:
            encoding_file.write("ISO-8859-1")
    return 0