Exemplo n.º 1
0
    def test_get_lat_lon(self):
        from utilities.nctools import get_lat_lon

        pixelwidth = 0.045
        pixelheight = 0.055

        extent = [145.00, -13.00, 146.00, -14.00]

        expect_lat = [
            -13.0275, -13.0825, -13.1375, -13.1925, -13.2475, -13.3025,
            -13.3575, -13.4125, -13.4675, -13.5225, -13.5775, -13.6325,
            -13.6875, -13.7425, -13.7975, -13.8525, -13.9075, -13.9625
        ]

        expect_lon = [
            145.0225, 145.0675, 145.1125, 145.1575, 145.2025, 145.2475,
            145.2925, 145.3375, 145.3825, 145.4275, 145.4725, 145.5175,
            145.5625, 145.6075, 145.6525, 145.6975, 145.7425, 145.7875,
            145.8325, 145.8775, 145.9225, 145.9675
        ]

        lon, lat = get_lat_lon(extent, pixelwidth, pixelheight)

        assert_almost_equal(expect_lon, lon, decimal=2)
        assert_almost_equal(expect_lat, lat, decimal=2)
Exemplo n.º 2
0
def convo_combine(ms_orig, slope_array, aspect_array):
    """
    Apply convolution to the orginal shielding factor for each direction and
    call the :term:`combine` module to consider the slope and aspect and remove
    conservitism to get final shielding multiplier values

    :param ms_orig: `file` the original shidelding factor map
    :param slope_array: :class:`numpy.ndarray` the input slope values
    :param aspect_array_reclassify: :class:`numpy.ndarray` input aspect values

    """
#    import pdb
#    pdb.set_trace()

    ms_orig_ds = gdal.Open(ms_orig)

    # get image size, format, projection
    cols = ms_orig_ds.RasterXSize
    rows = ms_orig_ds.RasterYSize
    geotransform = ms_orig_ds.GetGeoTransform()
    x_left = geotransform[0]
    y_upper = -geotransform[3]
    pixelwidth = geotransform[1]
    pixelheight = -geotransform[5]

    lon, lat = get_lat_lon(x_left, y_upper, pixelwidth, pixelheight, cols, rows)

    band = ms_orig_ds.GetRasterBand(1)
    data = band.ReadAsArray(0, 0, cols, rows)

    if ms_orig_ds is None:
        log.info('Could not open {0}'.format(ms_orig))
        sys.exit(1)

    x_m_array, y_m_array = get_pixel_size_grids(ms_orig_ds)
    pixelwidth = 0.5 * (np.mean(x_m_array) + np.mean(y_m_array))

    log.info('pixelwidth is {0}'.format(pixelwidth))

    ms_folder = os.path.dirname(ms_orig)
    nc_folder = pjoin(ms_folder, 'netcdf')
    file_name = os.path.basename(ms_orig)

    dire = ['w', 'e', 'n', 's', 'nw', 'ne', 'se', 'sw']

    for one_dir in dire:

        log.info(one_dir)
        
        kernel_size = int(100.0 / pixelwidth)
#        if one_dir in ['w', 'e', 'n', 's']:
#            kernel_size = int(100.0 / pixelwidth)
#        else:
#            kernel_size = int(100.0 / pixelwidth)

        log.info('convolution kernel size is {0}'.format(str(kernel_size)))

        # if the resolution size is bigger than 100 m, no covolution just copy
        # the initial shielding factor to each direction
        if kernel_size > 0:
            outdata = np.zeros((rows, cols), np.float32)

            kern_dir = globals()['kern_' + one_dir]
            mask = kern_dir(kernel_size)
            outdata = blur_image(data, mask)
        else:
            outdata = data

        result = combine(outdata, slope_array, aspect_array, one_dir)
        del outdata

        # output format as netCDF4
        tile_nc = pjoin(
            nc_folder,
            os.path.splitext(file_name)[0] +
            '_' +
            one_dir +
            '.nc')
        log.info("Saving terrain multiplier in netCDF file")
        save_multiplier('Ms', result, lat, lon, tile_nc)

        del result

    ms_orig_ds = None
Exemplo n.º 3
0
def terrain(temp_tile, tile_extents_nobuffer):
    """
    Performs core calculations to derive the terrain multiplier

    :param temp_tile: `file` the image file of the input tile of the land cover
    :param tile_extents_nobuffer: `tuple` the input tile extent without buffer

    """

    # open the tile
    temp_dataset = gdal.Open(temp_tile)

    # get image size, format, projection
    cols = temp_dataset.RasterXSize
    rows = temp_dataset.RasterYSize
    bands = temp_dataset.RasterCount
    log.info('Input raster format is %s' % temp_dataset.GetDriver().ShortName +
             '/ %s' % temp_dataset.GetDriver().LongName)
    log.info('Image size is %s' % cols + 'x %s' % rows + 'x %s' % bands)

    # get georeference info
    geotransform = temp_dataset.GetGeoTransform()
    x_left = geotransform[0]
    y_upper = -geotransform[3]
    pixelwidth = geotransform[1]
    pixelheight = -geotransform[5]

    # get the tile's longitude and latitude values used to save output in
    # netcdf
    lon, lat = get_lat_lon(tile_extents_nobuffer, pixelwidth, pixelheight)

    # get the average grid size in metre of the tile
    x_m_array, y_m_array = get_pixel_size_grids(temp_dataset)
    gridwidth = 0.5 * (np.mean(x_m_array) + np.mean(y_m_array))
    log.info('gridwidth is {0}'.format(gridwidth))

    # produce the original terrain multiplier from the input terrain map
    log.info(
        'Reclassify the terrain classes into initial terrain multipliers ...')
    band = temp_dataset.GetRasterBand(1)
    data = band.ReadAsArray(0, 0, cols, rows)

    nodata_value = band.GetNoDataValue()
    #if nodata_value is not None:
    #    data[np.where(data == nodata_value)] = np.nan
    #else:
    #    data[np.where(data is None)] = np.nan

    mz_init = get_terrain_table()
    reclassified_array = terrain_class2mz_orig(data, mz_init)

    # if the value is 0, it is nodata, if all 0s, empty tile
    if np.max(reclassified_array) == 0:
        log.info('Terrain dataset is all zeros. Terrain classification'
                 'will be skipped')
        return
    else:
        reclassified_array[reclassified_array == 0] = np.nan

    # assign nodata area as water with multiplier value 1
    mask = np.isnan(reclassified_array)
    reclassified_array[mask] = 1.0

    # convoulution of the original terrain multipler into different directions
    log.info('Moving average for each direction ...')
    dire = ['w', 'e', 'n', 's', 'nw', 'ne', 'se', 'sw']

    # set avearage and lag distance used for convolution as per
    # AS/NZ 1170.2 (2011) including amendments
    avg_dist = 500.
    lag_dist = 200.

    for one_dir in dire:
        log.info(one_dir)
        if one_dir in ['w', 'e', 'n', 's']:
            avg_width = int(np.around(avg_dist / gridwidth))
            lag_width = int(np.around(lag_dist / gridwidth))
        else:
            # for the diagonal directions, the avg_width is the same for x and
            # y component of the diagonal distance (avg_dist). lag_width is
            # the same principle as the avg_width
            avg_width = int(avg_dist / (gridwidth * 1.414))
            lag_width = int(lag_dist / (gridwidth * 1.414))

        # if the tile is smaller than the lag distance, no convolultion
        if lag_width > reclassified_array.shape[0]:
            outdata = reclassified_array
        else:
            # if the tile is smaller than the upwind buffer, all the tile is in
            # buffer
            if (avg_width + lag_width) > reclassified_array.shape[0]:
                avg_width = reclassified_array.shape[0] - lag_width

            log.info('convolution average width ' + str(avg_width))
            outdata = convo(one_dir, reclassified_array, avg_width, lag_width)
            outdata[mask] = np.nan

        # find output folder
        tile_folder = os.path.dirname(temp_tile)
        file_name = os.path.basename(temp_tile)

        # output format as netCDF4
        mz_folder = pjoin(tile_folder, 'terrain')

        tile_nc = pjoin(
            mz_folder,
            os.path.splitext(file_name)[0] +
            '_mz_' +
            one_dir +
            '.nc')
        log.info("Saving terrain multiplier in netCDF file")

        outdata_nobuffer = clip_array(outdata, x_left, y_upper, pixelwidth,
                                      pixelheight, tile_extents_nobuffer)

        save_multiplier('Mz', outdata_nobuffer, lat, lon, tile_nc)

        del outdata

    temp_dataset = None

    log.info(
        'finish terrain multiplier computation for this tile successfully')
Exemplo n.º 4
0
def convo_combine(ms_orig, slope_array, aspect_array, tile_extents_nobuffer):
    """
    Apply convolution to the orginal shielding factor for each direction and
    call the :term:`combine` module to consider the slope and aspect and remove
    conservitism to get final shielding multiplier values

    :param ms_orig: `file` the original shidelding factor map
    :param slope_array: :class:`numpy.ndarray` the input slope values
    :param aspect_array: :class:`numpy.ndarray` the input aspect values
    :param tile_extents_nobuffer: `tuple` the input tile extent without buffer

    """

    ms_orig_ds = gdal.Open(ms_orig)
    if ms_orig_ds is None:
        log.info('Could not open ' + ms_orig)
        sys.exit(1)

    log.info('ms_orig is {0}'.format(ms_orig))

    # get image size, format, projection
    cols = ms_orig_ds.RasterXSize
    rows = ms_orig_ds.RasterYSize
    geotransform = ms_orig_ds.GetGeoTransform()
    x_left = geotransform[0]
    y_upper = -geotransform[3]
    pixelwidth = geotransform[1]
    pixelheight = -geotransform[5]

    lon, lat = get_lat_lon(tile_extents_nobuffer, pixelwidth, pixelheight)

    band = ms_orig_ds.GetRasterBand(1)
    data = band.ReadAsArray(0, 0, cols, rows)

    if ms_orig_ds is None:
        log.info('Could not open {0}'.format(ms_orig))
        sys.exit(1)

    x_m_array, y_m_array = get_pixel_size_grids(ms_orig_ds)
    gridwidth = 0.5 * (np.mean(x_m_array) + np.mean(y_m_array))

    log.info('gridwidth is {0}'.format(gridwidth))

    ms_folder = os.path.dirname(ms_orig)
    file_name = os.path.basename(ms_orig)

    dire = ['w', 'e', 'n', 's', 'nw', 'ne', 'se', 'sw']

    for one_dir in dire:

        log.info(one_dir)

        kernel_size = int(100.0 / gridwidth)

        log.info('convolution kernel size is {0}'.format(str(kernel_size)))

        # if the resolution size is bigger than 100 m, no covolution just copy
        # the initial shielding factor to each direction
        if kernel_size > 0:
            outdata = np.zeros((rows, cols), np.float32)

            kern_dir = globals()['kern_' + one_dir]
            mask = kern_dir(kernel_size)
            outdata = blur_image(data, mask)
        else:
            outdata = data

        result = combine(outdata, slope_array, aspect_array, one_dir)
        del outdata

        # output format as netCDF4
        tile_nc = pjoin(ms_folder,
                        os.path.splitext(file_name)[0] + '_' + one_dir + '.nc')
        log.info("Saving shielding multiplier in netCDF file")

        result_nobuffer = clip_array(result, x_left, y_upper, pixelwidth,
                                     pixelheight, tile_extents_nobuffer)

        save_multiplier('Ms', result_nobuffer, lat, lon, tile_nc)

        del result

    ms_orig_ds = None

    os.remove(ms_orig)
    os.chdir(ms_folder)
    filelist = glob.glob('*.xml')

    log.debug("useless xml files: {0}".format(repr(filelist)))

    if len(filelist) != 0:
        for f in filelist:
            try:
                os.remove(f)
            except OSError:
                pass
Exemplo n.º 5
0
def topomult(input_dem, tile_extents_nobuffer):
    """
    Executes core topographic multiplier functionality

    :param input_dem: `file` the input tile of the DEM
    :param tile_extents_nobuffer: `tuple` the input tile extent without buffer
    """

    # find output folder
    mh_folder = pjoin(os.path.dirname(input_dem), 'topographic')
    file_name = os.path.basename(input_dem)

    ds = gdal.Open(input_dem)
    nc = ds.RasterXSize
    nr = ds.RasterYSize

    geotransform = ds.GetGeoTransform()
    x_left = geotransform[0]
    y_upper = -geotransform[3]
    pixelwidth = geotransform[1]
    pixelheight = -geotransform[5]

    lon, lat = get_lat_lon(tile_extents_nobuffer, pixelwidth, pixelheight)

    band = ds.GetRasterBand(1)
    elevation_array = band.ReadAsArray(0, 0, nc, nr)

    nodata_value = band.GetNoDataValue()
    if nodata_value is not None:
        elevation_array[np.where(elevation_array == nodata_value)] = np.nan
    else:
        elevation_array[np.where(elevation_array is None)] = np.nan

    elevation_array_tran = np.transpose(elevation_array)
    data = elevation_array_tran.flatten()

    x_m_array, y_m_array = get_pixel_size_grids(ds)
    cellsize = 0.5 * (np.mean(x_m_array) + np.mean(y_m_array))

    # Compute the starting positions along the boundaries depending on dir
    # Together, the direction and the starting position determines a line.
    # Note that the starting positions are defined
    # in terms of the 1-d index of the array.

    directions = ['n', 's', 'e', 'w', 'ne', 'nw', 'se', 'sw']

    for direction in directions:
        log.info(direction)

        if len(direction) == 2:
            data_spacing = cellsize * math.sqrt(2)
        else:
            data_spacing = cellsize

        mhdata = np.ones(data.shape)

        strt_idx = []
        if direction.find('n') >= 0:
            strt_idx = np.append(strt_idx, list(range(0, nr * nc, nr)))
        if direction.find('s') >= 0:
            strt_idx = np.append(strt_idx, list(range(nr - 1, nr * nc, nr)))
        if direction.find('e') >= 0:
            strt_idx = np.append(strt_idx, list(range((nc - 1) * nr, nr * nc)))
        if direction.find('w') >= 0:
            strt_idx = np.append(strt_idx, list(range(0, nr)))

        # For the diagonal directions the corner will have been counted twice
        # so get rid of the duplicates then loop over the data lines
        # (i.e. over the starting positions)
        strt_idx = np.unique(strt_idx)

        for ctr, idx in enumerate(strt_idx):
            log.debug('Processing path %3i' % ctr + ' of %3i' % len(strt_idx) +
                      ', index %5i.' % idx)

            # Get a line of the data
            # path is a 1-d vector which gives the indices of the data
            path = make_path.make_path(nr, nc, idx, direction)
            line = data[path]
            line[np.isnan(line)] = 0.
            m = multiplier_calc.multiplier_calc(line, data_spacing)

            # write the line back to the data array
            m = np.transpose(m)
            mhdata[path] = m[0, ].flatten()

        # Reshape the result to matrix like
        mhdata = np.reshape(mhdata, (nc, nr))
        mhdata = np.transpose(mhdata)

        # Remove the conservatism as described in the Reference
        mhdata = remove_conservatism(mhdata)

        # consider the Tasmania factor
        if x_left > 143.0 and y_upper > 40.0:
            mhdata = tasmania(mhdata, elevation_array)

        # smooth
        g = np.ones((3, 3)) / 9.
        mhsmooth = signal.convolve(mhdata, g, mode='same')
        mhsmooth[np.isnan(elevation_array)] = np.nan
        del mhdata

        # output format as netCDF4
        tile_nc = pjoin(
            mh_folder,
            os.path.splitext(file_name)[0][:-4] + '_mt_' + direction + '.nc')

        mhsmooth_nobuffer = clip_array(mhsmooth, x_left, y_upper, pixelwidth,
                                       pixelheight, tile_extents_nobuffer)

        save_multiplier('Mt', mhsmooth_nobuffer, lat, lon, tile_nc)
        del mhsmooth

        log.info('Finished direction {0}'.format(direction))

    ds = None
Exemplo n.º 6
0
def terrain(cyclone_area, temp_tile):
    """
    Performs core calculations to derive the terrain multiplier

    :param cyclone_area: none or `file` input tile of the cyclone area file.
    :param temp_tile: `file` the image file of the input tile of the land cover

    """

    # open the tile
    temp_dataset = gdal.Open(temp_tile)

    # get image size, format, projection
    cols = temp_dataset.RasterXSize
    rows = temp_dataset.RasterYSize
    bands = temp_dataset.RasterCount
    log.info('Input raster format is %s' % temp_dataset.GetDriver().ShortName +
             '/ %s' % temp_dataset.GetDriver().LongName)
    log.info('Image size is %s' % cols + 'x %s' % rows + 'x %s' % bands)

    # get georeference info
    geotransform = temp_dataset.GetGeoTransform()
    x_left = geotransform[0]
    y_upper = -geotransform[3]
    pixelwidth = geotransform[1]
    pixelheight = -geotransform[5]

    # get the tile's longitude and latitude values used to save output in
    # netcdf
    lon, lat = get_lat_lon(x_left, y_upper, pixelwidth, pixelheight, cols, rows)

    # get the average grid size in metre of the tile
    x_m_array, y_m_array = get_pixel_size_grids(temp_dataset)
    pixelwidth = 0.5 * (np.mean(x_m_array) + np.mean(y_m_array))
    log.info('pixelwidth is {0}'.format(pixelwidth))

    # produce the original terrain multiplier from the input terrain map
    log.info(
        'Reclassfy the terrain classes into initial terrain multipliers ...')
    band = temp_dataset.GetRasterBand(1)
    data = band.ReadAsArray(0, 0, cols, rows)
    reclassified_array = terrain_class2mz_orig(cyclone_area, data)
    # if the value is 0, it is nodata
    reclassified_array[reclassified_array == 0] = np.nan
    
    # assign nodata area as average non-cycl multiplier value 
    mask = np.isnan(reclassified_array)
    reclassified_array[mask] = 0.931 

    # convoulution of the original terrain multipler into different directions
    log.info('Moving average for each direction ...')
    dire = ['w', 'e', 'n', 's', 'nw', 'ne', 'se', 'sw']

    # set the terrain buffer used for convolution as per AS/NZ 1170.2 (2011)
    terrain_buffer = 1000.

    for one_dir in dire:
        log.info(one_dir)
        if one_dir in ['w', 'e', 'n', 's']:
            filter_width = int(terrain_buffer / pixelwidth)
        else:
            filter_width = int(terrain_buffer / (pixelwidth * 1.414))

        # if the tile is smaller than the upwind buffer, all the tile is in
        # buffer
        if filter_width > reclassified_array.shape[0]:
            filter_width = reclassified_array.shape[0]

        log.info('convolution filter width ' + str(filter_width))

        convo_dir = globals()['convo_' + one_dir]
        outdata = convo_dir(reclassified_array, filter_width)
        outdata[mask] = np.nan

        # find output folder
        tile_folder = os.path.dirname(temp_tile)
        file_name = os.path.basename(temp_tile)

        # output format as netCDF4
        nc_folder = pjoin(pjoin(tile_folder, 'terrain'), 'netcdf')
        tile_nc = pjoin(
            nc_folder,
            os.path.splitext(file_name)[0] +
            '_mz_' +
            one_dir +
            '.nc')
        log.info("Saving terrain multiplier in netCDF file")
        save_multiplier('Mz', outdata, lat, lon, tile_nc)

        del outdata

    temp_dataset = None

    log.info(
        'finish terrain multiplier computation for this tile successfully')
def convo_combine(ms_orig, slope_array, aspect_array, tile_extents_nobuffer):
    """
    Apply convolution to the orginal shielding factor for each direction and
    call the :term:`combine` module to consider the slope and aspect and remove
    conservitism to get final shielding multiplier values

    :param ms_orig: `file` the original shidelding factor map
    :param slope_array: :class:`numpy.ndarray` the input slope values
    :param aspect_array: :class:`numpy.ndarray` the input aspect values
    :param tile_extents_nobuffer: `tuple` the input tile extent without buffer

    """

    ms_orig_ds = gdal.Open(ms_orig)
    if ms_orig_ds is None:
        log.info('Could not open ' + ms_orig)
        sys.exit(1)

    log.info('ms_orig is {0}'.format(ms_orig))

    # get image size, format, projection
    cols = ms_orig_ds.RasterXSize
    rows = ms_orig_ds.RasterYSize
    geotransform = ms_orig_ds.GetGeoTransform()
    x_left = geotransform[0]
    y_upper = -geotransform[3]
    pixelwidth = geotransform[1]
    pixelheight = -geotransform[5]

    lon, lat = get_lat_lon(tile_extents_nobuffer, pixelwidth, pixelheight)

    band = ms_orig_ds.GetRasterBand(1)
    data = band.ReadAsArray(0, 0, cols, rows)

    if ms_orig_ds is None:
        log.info('Could not open {0}'.format(ms_orig))
        sys.exit(1)

    x_m_array, y_m_array = get_pixel_size_grids(ms_orig_ds)
    gridwidth = 0.5 * (np.mean(x_m_array) + np.mean(y_m_array))

    log.info('gridwidth is {0}'.format(gridwidth))

    ms_folder = os.path.dirname(ms_orig)
    file_name = os.path.basename(ms_orig)

    dire = ['w', 'e', 'n', 's', 'nw', 'ne', 'se', 'sw']

    for one_dir in dire:

        log.info(one_dir)

        kernel_size = int(100.0 / gridwidth)

        log.info('convolution kernel size is {0}'.format(str(kernel_size)))

        # if the resolution size is bigger than 100 m, no covolution just copy
        # the initial shielding factor to each direction
        if kernel_size > 0:
            outdata = np.zeros((rows, cols), np.float32)

            kern_dir = globals()['kern_' + one_dir]
            mask = kern_dir(kernel_size)
            outdata = blur_image(data, mask)
        else:
            outdata = data

        result = combine(outdata, slope_array, aspect_array, one_dir)
        log.debug('Maximum shielding value is {0}'.format(result.max()))
        log.debug('Minimum shielding value is {0}'.format(result.min()))
        del outdata

        # output format as netCDF4
        tile_nc = pjoin(
            ms_folder,
            os.path.splitext(file_name)[0] +
            '_' +
            one_dir +
            '.nc')
        log.info("Saving shielding multiplier in netCDF file")

        result_nobuffer = clip_array(result, x_left, y_upper, pixelwidth,
                                     pixelheight, tile_extents_nobuffer)

        save_multiplier('Ms', result_nobuffer, lat, lon, tile_nc)

        del result

    ms_orig_ds = None
    try:
        os.remove(ms_orig)
    except:
        pass
    os.chdir(ms_folder)
    filelist = glob.glob('*.xml')

    log.debug("useless xml files: {0}".format(repr(filelist)))

    if len(filelist) != 0:
        for f in filelist:
            try:
                os.remove(f)
            except OSError:
                pass
Exemplo n.º 8
0
def topomult(input_dem):
    """
    Executes core topographic multiplier functionality

    :param input_dem: `file` the input tile of the DEM
    """

    # find output folder
    mh_folder = pjoin(os.path.dirname(input_dem), 'topographic')
    file_name = os.path.basename(input_dem)
    nc_folder = pjoin(mh_folder, 'netcdf')

    ds = gdal.Open(input_dem)
    nc = ds.RasterXSize
    nr = ds.RasterYSize

    geotransform = ds.GetGeoTransform()
    x_left = geotransform[0]
    y_upper = -geotransform[3]
    pixelwidth = geotransform[1]
    pixelheight = -geotransform[5]

    lon, lat = get_lat_lon(x_left, y_upper, pixelwidth, pixelheight, nc, nr)

    band = ds.GetRasterBand(1)
    elevation_array = band.ReadAsArray(0, 0, nc, nr)
    elevation_array[np.where(elevation_array < -0.001)] = np.nan

    elevation_array_tran = np.transpose(elevation_array)
    data = elevation_array_tran.flatten()

    x_m_array, y_m_array = get_pixel_size_grids(ds)
    cellsize = 0.5 * (np.mean(x_m_array) + np.mean(y_m_array))

    # Compute the starting positions along the boundaries depending on dir
    # Together, the direction and the starting position determines a line.
    # Note that the starting positions are defined
    # in terms of the 1-d index of the array.

    directions = ['n', 's', 'e', 'w', 'ne', 'nw', 'se', 'sw']

    for direction in directions:
        log.info(direction)

        if len(direction) == 2:
            data_spacing = cellsize * math.sqrt(2)
        else:
            data_spacing = cellsize

        mhdata = np.ones(data.shape)

        strt_idx = []
        if direction.find('n') >= 0:
            strt_idx = np.append(strt_idx, list(range(0, nr * nc, nr)))
        if direction.find('s') >= 0:
            strt_idx = np.append(strt_idx, list(range(nr - 1, nr * nc, nr)))
        if direction.find('e') >= 0:
            strt_idx = np.append(strt_idx, list(range((nc - 1) * nr, nr * nc)))
        if direction.find('w') >= 0:
            strt_idx = np.append(strt_idx, list(range(0, nr)))

        # For the diagonal directions the corner will have been counted twice
        # so get rid of the duplicates then loop over the data lines
        # (i.e. over the starting positions)
        strt_idx = np.unique(strt_idx)

        for ctr, idx in enumerate(strt_idx):
            log.debug('Processing path %3i' % ctr + ' of %3i' % len(strt_idx)
                      + ', index %5i.' % idx)

            # Get a line of the data
            # path is a 1-d vector which gives the indices of the data
            path = make_path.make_path(nr, nc, idx, direction)
            line = data[path]
            line[np.isnan(line)] = 0.
            m = multiplier_calc.multiplier_calc(line, data_spacing)

            # write the line back to the data array
            m = np.transpose(m)
            mhdata[path] = m[0, ].flatten()

        # Reshape the result to matrix like
        mhdata = np.reshape(mhdata, (nc, nr))
        mhdata = np.transpose(mhdata)

        # smooth
        g = np.ones((3, 3)) / 9.
        mhsmooth = signal.convolve(mhdata, g, mode='same')
        mhsmooth[np.isnan(elevation_array)] = np.nan
        del mhdata

        # output format as netCDF4
        tile_nc = pjoin(nc_folder, os.path.splitext(file_name)[0][:-4] + '_mt_' +
                        direction + '.nc')
        save_multiplier('Mt', mhsmooth, lat, lon, tile_nc)
        del mhsmooth

        log.info('Finished direction {0}'.format(direction))

    ds = None