Exemple #1
0
def ard_to_rgb(infile, outfile, driver='GTiff', to_db=True):

    prefix = glob.glob(os.path.abspath(infile[:-4]) + '*data')[0]

    if len(glob.glob(opj(prefix, '*VV*.img'))) == 1:
        co_pol = glob.glob(opj(prefix, '*VV*.img'))[0]

    if len(glob.glob(opj(prefix, '*VH*.img'))) == 1:
        cross_pol = glob.glob(opj(prefix, '*VH*.img'))[0]

    if len(glob.glob(opj(prefix, '*HH*.img'))) == 1:
        co_pol = glob.glob(opj(prefix, '*HH*.img'))[0]

    if len(glob.glob(opj(prefix, '*HV*.img'))) == 1:
        cross_pol = glob.glob(opj(prefix, '*HV*.img'))[0]

    # !!!!assure and both pols exist!!!
    with rasterio.open(co_pol) as co:

        # get meta data
        meta = co.meta

        # update meta
        meta.update(driver=driver, count=3, nodata=0)

        with rasterio.open(cross_pol) as cr:

            # !assure that dimensions match ####
            with rasterio.open(outfile, 'w', **meta) as dst:

                if co.shape != cr.shape:
                    print(' dimensions do not match')
                # loop through blocks
                for i, window in co.block_windows(1):

                    # read arrays and turn to dB (in case it isn't)
                    co_array = co.read(window=window)
                    cr_array = cr.read(window=window)

                    if to_db:
                        # turn to db
                        co_array = ras.convert_to_db(co_array)
                        cr_array = ras.convert_to_db(cr_array)

                        # adjust for dbconversion
                        co_array[co_array == -130] = 0
                        cr_array[cr_array == -130] = 0

                    # turn 0s to nan
                    co_array[co_array == 0] = np.nan
                    cr_array[cr_array == 0] = np.nan

                    # create log ratio by subtracting the dbs
                    ratio_array = np.subtract(co_array, cr_array)

                    # write file
                    for k, arr in [(1, co_array), (2, cr_array),
                                   (3, ratio_array)]:
                        dst.write(arr[0, ], indexes=k, window=window)
def ard_to_thumbnail(infile,
                     outfile,
                     driver='JPEG',
                     shrink_factor=25,
                     to_db=True):
    prefix = glob.glob(os.path.abspath(infile[:-4]) + '*data')[0]

    if len(glob.glob(opj(prefix, '*VV*.img'))) == 1:
        co_pol = glob.glob(opj(prefix, '*VV*.img'))[0]

    if len(glob.glob(opj(prefix, '*VH*.img'))) == 1:
        cross_pol = glob.glob(opj(prefix, '*VH*.img'))[0]

    if len(glob.glob(opj(prefix, '*HH*.img'))) == 1:
        co_pol = glob.glob(opj(prefix, '*HH*.img'))[0]

    if len(glob.glob(opj(prefix, '*HV*.img'))) == 1:
        cross_pol = glob.glob(opj(prefix, '*HV*.img'))[0]

    # !!!assure and both pols exist
    with rasterio.open(co_pol) as co, rasterio.open(cross_pol) as cr:
        # get meta data
        meta = co.meta
        # update meta
        meta.update(driver=driver, count=3, dtype='uint8')
        # !!!assure that dimensions match ####
        new_height = int(co.height / shrink_factor)
        new_width = int(co.width / shrink_factor)
        out_shape = (co.count, new_height, new_width)

        meta.update(height=new_height, width=new_width)

        if co.shape != cr.shape:
            logger.debug('dimensions do not match')

        # read arrays and turn to dB

        co_array = co.read(out_shape=out_shape, resampling=5)
        cr_array = cr.read(out_shape=out_shape, resampling=5)

        if to_db:
            co_array = ras.convert_to_db(co_array)
            cr_array = ras.convert_to_db(cr_array)

        co_array[co_array == 0] = np.nan
        cr_array[cr_array == 0] = np.nan

        # create log ratio
        ratio_array = np.subtract(co_array, cr_array)

        r = ras.scale_to_int(co_array, -20, 0, 'uint8')
        g = ras.scale_to_int(cr_array, -25, -5, 'uint8')
        b = ras.scale_to_int(ratio_array, 1, 15, 'uint8')

        with rasterio.open(outfile, 'w', **meta) as dst:
            for k, arr in [(1, r), (2, g), (3, b)]:
                dst.write(arr[0, ], indexes=k)
def execute_burst_to_tif(dim_file, out_path, driver='GTiff', to_db=False):
    i, dim_file = dim_file
    out_tif = opj(out_path, str(i) + '.tif')
    prefix = glob.glob(os.path.abspath(dim_file[:-4]) + '*data')[0]
    if len(glob.glob(opj(prefix, '*VV*.img'))) == 1:
        co_pol = glob.glob(opj(prefix, '*VV*.img'))[0]
    if len(glob.glob(opj(prefix, '*VH*.img'))) == 1:
        cross_pol = glob.glob(opj(prefix, '*VH*.img'))[0]
    if len(glob.glob(opj(prefix, '*HH*.img'))) == 1:
        co_pol = glob.glob(opj(prefix, '*HH*.img'))[0]

    if len(glob.glob(opj(prefix, '*HV*.img'))) == 1:
        cross_pol = glob.glob(opj(prefix, '*HV*.img'))[0]
    with rasterio.open(co_pol) as co, rasterio.open(cross_pol) as cr:
        out_profile = co.meta
        out_profile.update(driver=driver,
                           count=3,
                           nodata=0.0,
                           compress='Deflate',
                           dtype='float32')
        with rasterio.open(out_tif, 'w', **out_profile) as dst:
            if co.shape != cr.shape:
                logger.debug('dimensions do not match')
            # read arrays and turn to dB (in case it isn't)
            co_array = co.read(resampling=Resampling.cubic_spline).astype(
                np.float32)
            cr_array = cr.read(resampling=Resampling.cubic_spline).astype(
                np.float32)
            if to_db:
                # turn to db
                co_array = ras.convert_to_db(co_array)
                cr_array = ras.convert_to_db(cr_array)
                # adjust for dbconversion
                co_array[co_array == -130] = 0
                cr_array[cr_array == -130] = 0
            # turn 0s to nan
            co_array[co_array == 0] = 0.
            cr_array[cr_array == 0] = 0.

            border_mask = ras.np_binary_erosion(co_array, ).astype(np.bool)

            co_array = np.where(border_mask, co_array, 0)
            cr_array = np.where(border_mask, cr_array, 0)

            # create log ratio by subtracting the dbs
            ratio_array = np.subtract(co_array, cr_array)
            # write file
            for k, arr in [(1, co_array), (2, cr_array), (3, ratio_array)]:
                dst.write(arr[0, ], indexes=k)
    return out_tif
Exemple #4
0
def mt_metrics(stack,
               out_prefix,
               metrics,
               rescale_to_datatype=False,
               to_power=False,
               outlier_removal=False):

    with rasterio.open(stack) as src:

        # get metadata
        meta = src.meta

        # update driver and reduced band count
        meta.update({'driver': 'GTiff'})
        meta.update({'count': 1})

        # write all different output files into a dictionary
        metric_dict = {}
        for metric in metrics:
            metric_dict[metric] = rasterio.open(
                out_prefix + '.' + metric + '.tif', 'w', **meta)

        # scaling factors in case we have to rescale to integer
        minimums = {
            'avg': -30,
            'max': -30,
            'min': -30,
            'std': 0.00001,
            'cov': 0.00001
        }
        maximums = {'avg': 5, 'max': 5, 'min': 5, 'std': 15, 'cov': 1}

        # loop through blocks
        for _, window in src.block_windows(1):

            # read array with all bands
            stack = src.read(range(1, src.count + 1), window=window)

            if rescale_to_datatype is True and meta['dtype'] != 'float32':
                stack = ras.rescale_to_float(stack, meta['dtype'])

            # transform to power
            if to_power is True:
                stack = ras.convert_to_power(stack)

            # outlier removal (only applies if there are more than 5 bands)
            if outlier_removal is True and src.count >= 5:
                stack = remove_outliers(stack)

            # get stats
            arr = {}
            arr['avg'] = (np.nan_to_num(np.nanmean(stack, axis=0))
                          if 'avg' in metrics else False)
            arr['max'] = (np.nan_to_num(np.nanmax(stack, axis=0))
                          if 'max' in metrics else False)
            arr['min'] = (np.nan_to_num(np.nanmin(stack, axis=0))
                          if 'min' in metrics else False)
            arr['std'] = (np.nan_to_num(np.nanstd(stack, axis=0))
                          if 'std' in metrics else False)
            arr['cov'] = (np.nan_to_num(
                stats.variation(stack, axis=0, nan_policy='omit'))
                          if 'cov' in metrics else False)

            # the metrics to be re-turned to dB, in case to_power is True
            metrics_to_convert = ['avg', 'min', 'max']

            # do the back conversions and write to disk loop
            for metric in metrics:

                if to_power is True and metric in metrics_to_convert:
                    arr[metric] = ras.convert_to_db(arr[metric])

                if rescale_to_datatype is True and meta['dtype'] != 'float32':
                    arr[metric] = ras.scale_to_int(arr[metric], meta['dtype'],
                                                   minimums[metric],
                                                   maximums[metric])

                # write to dest
                metric_dict[metric].write(np.float32(arr[metric]),
                                          window=window,
                                          indexes=1)

    # close the output files
    for metric in metrics:
        metric_dict[metric].close()
def mt_metrics(stack,
               out_prefix,
               metrics,
               rescale_to_datatype=False,
               to_power=False,
               outlier_removal=False,
               datelist=None):
    # from datetime import datetime
    with rasterio.open(stack) as src:
        harmonics = False
        if 'harmonics' in metrics:
            logger.debug('INFO: Calculating harmonics')
            if not datelist:
                logger.debug('WARNING: Harmonics need the datelist. '
                             'Harmonics will not be calculated')
            else:
                harmonics = True
                metrics.remove('harmonics')
                metrics.extend(['amplitude', 'phase', 'residuals'])

        if 'percentiles' in metrics:
            metrics.remove('percentiles')
            metrics.extend(['p95', 'p5'])

        # get metadata
        meta = src.profile

        # update driver and reduced band count
        meta.update({'driver': 'GTiff'})
        meta.update({'count': 1})

        # write all different output files into a dictionary
        metric_dict = {}
        for metric in metrics:
            filename = '{}.{}.tif'.format(out_prefix, metric)
            metric_dict[metric] = rasterio.open(filename, 'w', **meta)

        # scaling factors in case we have to rescale to integer
        minimums = {
            'avg': -30,
            'max': -30,
            'min': -30,
            'std': 0.00001,
            'cov': 0.00001,
            'count': 0
        }
        maximums = {
            'avg': 5,
            'max': 5,
            'min': 5,
            'std': 15,
            'cov': 1,
            'count': 64000
        }

        if harmonics:
            # construct independent variables
            dates, sines, cosines = [], [], []
            two_pi = np.multiply(2, np.pi)
            for date in sorted(datelist):

                delta = difference_in_years(
                    datetime.strptime('700101', "%y%m%d"),
                    datetime.strptime(date, "%y%m%d"))
                dates.append(delta)
                sines.append(np.sin(np.multiply(two_pi, delta - 0.5)))
                cosines.append(np.cos(np.multiply(two_pi, delta - 0.5)))

            arr = np.array([dates, cosines, sines])

        # loop through blocks
        for _, window in src.block_windows(1):
            # read array with all bands
            stack = src.read(range(1, src.count + 1), window=window)

            if rescale_to_datatype is True and meta['dtype'] != 'float32':
                stack = ras.rescale_to_float(stack, meta['dtype'])

            # transform to power
            if to_power is True:
                stack = ras.convert_to_power(stack)

            # outlier removal (only applies if there are more than 5 bands)
            if outlier_removal is True and src.count >= 5:
                stack = remove_outliers(stack)

            # get stats
            np.seterr(divide='ignore', invalid='ignore')
            arr = {}
            arr['p95'], arr['p5'] = (np.nan_to_num(
                nan_percentile(stack, [95, 5])) if 'p95' in metrics else
                                     (False, False))
            arr['median'] = (np.nan_to_num(np.nanmedian(stack, axis=0))
                             if 'median' in metrics else False)
            arr['avg'] = (np.nan_to_num(np.nanmean(stack, axis=0))
                          if 'avg' in metrics else False)
            arr['max'] = (np.nan_to_num(np.nanmax(stack, axis=0))
                          if 'max' in metrics else False)
            arr['min'] = (np.nan_to_num(np.nanmin(stack, axis=0))
                          if 'min' in metrics else False)
            arr['std'] = (np.nan_to_num(np.nanstd(stack, axis=0))
                          if 'std' in metrics else False)
            arr['cov'] = (np.nan_to_num(
                stats.variation(stack, axis=0, nan_policy='omit'))
                          if 'cov' in metrics else False)
            arr['count'] = (np.nan_to_num(np.count_nonzero(stack, axis=0))
                            if 'count' in metrics else False)

            if harmonics:
                stack_size = (stack.shape[1], stack.shape[2])
                if to_power is True:
                    y = ras.convert_to_db(stack).reshape(stack.shape[0], -1)
                else:
                    y = stack.reshape(stack.shape[0], -1)

                arr, residuals, _, _ = np.linalg.lstsq(arr.T, y)
                arr['amplitude'] = np.hypot(arr[1], arr[2]).reshape(stack_size)
                arr['phase'] = np.arctan2(arr[2], arr[1]).reshape(stack_size)
                arr['residuals'] = np.sqrt(np.divide(
                    residuals, stack.shape[0])).reshape(stack_size)

            # the metrics to be re-turned to dB, in case to_power is True
            metrics_to_convert = ['avg', 'min', 'max', 'p95', 'p5', 'median']

            # do the back conversions and write to disk loop
            for metric in metrics:
                if to_power is True and metric in metrics_to_convert:
                    arr[metric] = ras.convert_to_db(arr[metric])

                if rescale_to_datatype is True and meta['dtype'] != 'float32':
                    arr[metric] = ras.scale_to_int(arr[metric], meta['dtype'],
                                                   minimums[metric],
                                                   maximums[metric])
                # write to dest
                metric_dict[metric].write(np.float32(arr[metric]),
                                          window=window,
                                          indexes=1)
                metric_dict[metric].update_tags(
                    1,
                    BAND_NAME='{}_{}'.format(os.path.basename(out_prefix),
                                             metric))
                metric_dict[metric].set_band_description(
                    1, '{}_{}'.format(os.path.basename(out_prefix), metric))

    # close the output files
    for metric in metrics:
        # close rio opening
        metric_dict[metric].close()

    dirname = os.path.dirname(out_prefix)
    check_file = opj(dirname,
                     '.{}.processed'.format(os.path.basename(out_prefix)))
    with open(str(check_file), 'w') as file:
        file.write('passed all tests \n')
Exemple #6
0
def mt_metrics(stack, out_prefix, metrics, rescale_to_datatype, to_power,
               outlier_removal, datelist):
    """

    :param stack:
    :param out_prefix:
    :param metrics:
    :param rescale_to_datatype:
    :param to_power:
    :param outlier_removal:
    :param datelist:
    :return:
    """

    logger.info(f'Creating timescan layers ({metrics}) of track/burst '
                f'{out_prefix.parent.parent.name} for {out_prefix.name}')

    warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
    warnings.filterwarnings('ignore', r'Mean of empty slice')
    warnings.filterwarnings('ignore', r'Degrees of freedom', RuntimeWarning)

    harmonics = False
    if 'harmonics' in metrics:
        logger.info('Calculating harmonics')
        if not datelist:
            raise RuntimeWarning('Harmonics need the datelist. '
                                 'Harmonics will not be calculated')
        else:
            harmonics = True
            metrics.remove('harmonics')
            metrics.extend(
                ['amplitude', 'phase', 'residuals', 'trend', 'model_mean'])

    if 'percentiles' in metrics:
        metrics.remove('percentiles')
        metrics.extend(['p95', 'p5'])

    with rasterio.open(stack) as src:

        # get metadata
        meta = src.profile

        # update driver and reduced band count
        meta.update({'driver': 'GTiff'})
        meta.update({'count': 1})

        # write all different output files into a dictionary
        metric_dict = {}
        for metric in metrics:
            filename = f'{out_prefix}.{metric}.tif'
            metric_dict[metric] = rasterio.open(filename, 'w', **meta)

        # scaling factors in case we have to rescale to integer
        minimums = {
            'avg': int(-30),
            'max': int(-30),
            'min': int(-30),
            'median': -30,
            'p5': -30,
            'p95': -30,
            'std': 0.00001,
            'cov': 0.00001,
            'amplitude': -5,
            'phase': -np.pi,
            'residuals': -10,
            'trend': -5,
            'model_mean': -30
        }

        maximums = {
            'avg': 5,
            'max': 5,
            'min': 5,
            'median': 5,
            'p5': 5,
            'p95': 5,
            'std': 0.2,
            'cov': 1,
            'amplitude': 5,
            'phase': np.pi,
            'residuals': 10,
            'trend': 5,
            'model_mean': 5
        }

        if 'amplitude' in metrics:
            # construct independent variables
            dates, sines, cosines, intercept = [], [], [], []
            two_pi = np.multiply(2, np.pi)

            for date in sorted(datelist):
                delta = difference_in_years(
                    datetime.strptime('700101', "%y%m%d"),
                    datetime.strptime(date, "%y%m%d"))
                dates.append(delta)
                sines.append(np.sin(np.multiply(two_pi, delta)))
                cosines.append(np.cos(np.multiply(two_pi, delta)))
                intercept.append(1)

            x_array = np.array([dates, cosines, sines, intercept])

        # loop through blocks
        for _, window in src.block_windows(1):

            # read array with all bands
            stack = src.read(range(1, src.count + 1), window=window)

            # rescale to float
            if rescale_to_datatype is True and meta['dtype'] != 'float32':
                stack = ras.rescale_to_float(stack, meta['dtype'])

            # transform to power
            if to_power is True:
                stack = np.power(10, np.divide(stack, 10))

            # outlier removal (only applies if there are more than 5 bands)
            if outlier_removal is True and src.count >= 5:
                stack = remove_outliers(stack)

            # get stats
            arr = {
                'p95': (nan_percentile(stack, [95, 5]) if 'p95' in metrics else
                        (False, False))[0],
                'p5': (nan_percentile(stack, [95, 5]) if 'p95' in metrics else
                       (False, False))[1],
                'median': (np.nanmedian(stack, axis=0)
                           if 'median' in metrics else False),
                'avg':
                (np.nanmean(stack, axis=0) if 'avg' in metrics else False),
                'max':
                (np.nanmax(stack, axis=0) if 'max' in metrics else False),
                'min':
                (np.nanmin(stack, axis=0) if 'min' in metrics else False),
                'std':
                (np.nanstd(stack, axis=0) if 'std' in metrics else False),
                #'cov': (stats.variation(stack, axis=0, nan_policy='omit')
                'cov':
                (np.divide(np.nanstd(stack, axis=0), np.nanmean(stack, axis=0))
                 if 'cov' in metrics else False)
            }

            if 'amplitude' in metrics:

                stack_size = (stack.shape[1], stack.shape[2])
                if to_power is True:
                    y = ras.convert_to_db(stack).reshape(stack.shape[0], -1)
                else:
                    y = stack.reshape(stack.shape[0], -1)

                x, residuals, _, _ = np.linalg.lstsq(x_array.T, y, rcond=-1)
                arr['amplitude'] = np.hypot(x[1], x[2]).reshape(stack_size)
                arr['phase'] = np.arctan2(x[2], x[1]).reshape(stack_size)
                arr['trend'] = x[0].reshape(stack_size)
                arr['model_mean'] = x[3].reshape(stack_size)
                arr['residuals'] = np.sqrt(np.divide(
                    residuals, stack.shape[0])).reshape(stack_size)

            # the metrics to be re-turned to dB, in case to_power is True
            metrics_to_convert = ['avg', 'min', 'max', 'p95', 'p5', 'median']

            # do the back conversions and write to disk loop
            for metric in metrics:

                if to_power is True and metric in metrics_to_convert:
                    arr[metric] = ras.convert_to_db(arr[metric])

                if ((rescale_to_datatype is True
                     and meta['dtype'] != 'float32')
                        or (metric in ['cov', 'phase']
                            and meta['dtype'] != 'float32')):
                    arr[metric] = ras.scale_to_int(arr[metric],
                                                   minimums[metric],
                                                   maximums[metric],
                                                   meta['dtype'])

                # write to dest
                metric_dict[metric].write(np.nan_to_num(arr[metric]).astype(
                    meta['dtype']),
                                          window=window,
                                          indexes=1)
                metric_dict[metric].update_tags(
                    1, BAND_NAME=f'{Path(out_prefix).name}_{metric}')
                metric_dict[metric].set_band_description(
                    1, f'{Path(out_prefix).name}_{metric}')

    # close the output files
    for metric in metrics:
        # close rio opening
        metric_dict[metric].close()

        # construct filename
        filename = f'{str(out_prefix)}.{metric}.tif'
        return_code = h.check_out_tiff(filename)

        if return_code != 0:

            for metric_ in metrics:
                # remove all files and return
                filename = f'{str(out_prefix)}.{metric_}.tif'
                Path(filename).unlink()
                if Path(f'{filename}.xml').exists():
                    Path(f'{filename}.xml').unlink()

            return None, None, None, return_code

    # write out that it's been processed
    dirname = out_prefix.parent
    check_file = dirname.joinpath(f'.{out_prefix.name}.processed')
    with open(str(check_file), 'w') as file:
        file.write('passed all tests \n')

    target = out_prefix.parent.parent.name
    return target, out_prefix.name, metrics, None
Exemple #7
0
def ard_to_rgb(infile, outfile, driver='GTiff', to_db=True, shrink_factor=1):
    if infile.suffix != '.dim':
        raise TypeError('File needs to be in BEAM-DIMAP format')

    data_dir = infile.with_suffix('.data')

    if list(data_dir.glob('*VV.img')):
        co_pol = list(data_dir.glob('*VV*.img'))[0]
    elif list(data_dir.glob('*HH.img')):
        co_pol = list(data_dir.glob('*HH*.img'))[0]
    else:
        raise RuntimeError('No co-polarised band found.')

    if list(data_dir.glob('*VH.img')):
        cross_pol = list(data_dir.glob('*VH*.img'))[0]
    elif list(data_dir.glob('*HV.img')):
        cross_pol = list(data_dir.glob('*HV*.img'))[0]
    else:
        cross_pol = Path('/no/foo/no')

    if cross_pol.exists():

        with rasterio.open(co_pol) as co:

            # get meta data
            meta = co.meta

            # update meta
            meta.update(driver=driver, count=3, nodata=0)

            with rasterio.open(cross_pol) as cr:

                if co.shape != cr.shape:
                    raise RuntimeError(
                        'Dimensions of co- and cross-polarised bands '
                        'do not match')

                new_height = int(co.height / shrink_factor)
                new_width = int(co.width / shrink_factor)
                out_shape = (co.count, new_height, new_width)

                meta.update(height=new_height, width=new_width)

                co_array = co.read(out_shape=out_shape, resampling=5)
                cr_array = cr.read(out_shape=out_shape, resampling=5)

                # turn 0s to nan
                co_array[co_array == 0] = np.nan
                cr_array[cr_array == 0] = np.nan

                # create log ratio by subtracting the dbs
                ratio_array = np.divide(co_array, cr_array)

                if to_db:
                    # turn to db
                    co_array = ras.convert_to_db(co_array)
                    cr_array = ras.convert_to_db(cr_array)

                if driver == 'JPEG':
                    co_array = ras.scale_to_int(co_array, -20, 0, 'uint8')
                    cr_array = ras.scale_to_int(cr_array, -25, -5, 'uint8')
                    ratio_array = ras.scale_to_int(ratio_array, 1, 15, 'uint8')
                    meta.update(dtype='uint8')

                with rasterio.open(outfile, 'w', **meta) as dst:

                    # write file
                    for k, arr in [
                        (1, co_array), (2, cr_array), (3, ratio_array)
                    ]:
                        dst.write(arr[0, ], indexes=k)

    # greyscale
    else:
        logger.info(
            'No cross-polarised band found. Creating 1-band greyscale'
            'image.')

        with rasterio.open(co_pol) as co:

            # get meta data
            meta = co.meta

            # update meta
            meta.update(driver=driver, count=1, nodata=0)

            new_height = int(co.height / shrink_factor)
            new_width = int(co.width / shrink_factor)
            out_shape = (co.count, new_height, new_width)

            meta.update(height=new_height, width=new_width)

            co_array = co.read(out_shape=out_shape, resampling=5)

            if to_db:
                # turn to db
                co_array = ras.convert_to_db(co_array)

            if driver == 'JPEG':
                co_array = ras.scale_to_int(co_array, -20, 0, 'uint8')
                meta.update(dtype='uint8')

            with rasterio.open(outfile, 'w', **meta) as dst:
                dst.write(co_array)