def to_python(self, value): if not value or isinstance(value, RasterRenderer): return value try: data = json.loads(value) name = data['name'] params = data.get('params', {}) kwargs = { 'colormap': [(c[0], Color(*c[1])) for c in data['colormap']], 'fill_value': params.get('fill_value'), 'background_color': Color(*params.get('background_color')) if params.get('background_color') else None } if name == "stretched": cls = StretchedRenderer kwargs.update({ 'method': params.get('method', 'linear'), 'colorspace': params.get('colorspace', 'hsv') }) elif name == "classified": cls = ClassifiedRenderer elif name == "unique": cls = UniqueValuesRenderer kwargs.update({ 'labels': params.get('labels') }) except (ValueError, KeyError): raise ValidationError("") return cls(**kwargs)
def test_classified_rendererer(tmpdir): data = numpy.zeros((100, 100)) for i in range(0, 100): data[i] = i colors = ((10, Color(255, 0, 0, 255)), (50, Color(0, 255, 0, 255)), (data.max(), Color(0, 0, 255, 255))) renderer = ClassifiedRenderer(colors) assert renderer.name == 'classified' img = renderer.render_image(data) img.save(str(tmpdir.join("classified.png"))) assert img.palette.palette == b'\xff\x00\x00\x00\xff\x00\x00\x00\xff\x00\x00\x00' assert img.size == (100, 100) legend = renderer.get_legend(20, 20) assert len(legend) == 3 for index, element in enumerate(legend): element.image.save(str(tmpdir.join("classified_legend_%i.png" % index))) expected = { 'colors': [(10, '#F00'), (50, '#0F0'), (99.0, '#00F')], 'type': 'classified' } assert renderer.serialize() == expected
def test_stretched_renderer(tmpdir): data = numpy.zeros((100, 100)) for i in range(0, 100): data[i] = i colors = ((data.min(), Color(255, 0, 0, 255)), (data.max(), Color(0, 0, 255, 255))) renderer = StretchedRenderer(colors) assert renderer.name == 'stretched' img = renderer.render_image(data) assert len(img.getpalette()) / 3 == 256 assert img.size == (100, 100) img.save(str(tmpdir.join("stretched.png"))) legend = renderer.get_legend(20, 20) assert len(legend) == 1 assert legend[0].image.size == (20, 20) legend[0].image.save(str(tmpdir.join("stretched_legend.png"))) legend = renderer.get_legend(20, 20, discrete_images=True) assert len(legend) == 2 assert legend[0].image.size == (20, 20) expected = { 'colors': [(0.0, '#F00'), (99.0, '#00F')], 'type': 'stretched', 'options': { 'color_space': 'hsv' } } assert renderer.serialize() == expected
def test_uniquevalues_renderer(tmpdir): data = numpy.zeros((100, 100)) data[10:25] = 10 data[35:50] = 25 data[50:75] = 50 data[85:100] = 100 colors = ((10, Color(255, 0, 0, 255)), (25, Color(255, 255, 255, 255)), (50, Color(0, 255, 0, 255)), (100, Color(0, 0, 255, 255))) labels = ('A', 'B', 'C', 'D') renderer = UniqueValuesRenderer(colors, labels=labels) assert renderer.name == 'unique' img = renderer.render_image(data) img.save(str(tmpdir.join("unique.png"))) assert img.palette.palette == b'\xff\x00\x00\xff\xff\xff\x00\xff\x00\x00\x00\xff\x00\x00\x00' assert img.size == (100, 100) legend = renderer.get_legend(20, 20) assert len(legend) == 4 for index, element in enumerate(legend): element.image.save( str(tmpdir.join("uniquevalues_legend_%i.png" % index))) expected = { 'colors': [(10, '#F00'), (25, '#FFF'), (50, '#0F0'), (100, '#00F')], 'type': 'unique', 'options': { 'labels': ('A', 'B', 'C', 'D') } } assert renderer.serialize() == expected
def palette_to_stretched_renderer(palette_path, values, filenames=None, variable=None, fill_value=None, mask=None): palette = get_palette(palette_path) values = values.split(',') if not len(values) > 1: raise ValueError('Must provide at least 2 values for palette-based stretched renderer') if 'min' in values or 'max' in values: if not filenames and variable: raise ValueError('filenames and variable are required inputs to use palette with statistics') statistics = collect_statistics(filenames, (variable,), mask=mask)[variable] for statistic in ('min', 'max'): if statistic in values: values[values.index(statistic)] = statistics[statistic] values = [float(v) for v in values] # in case any are still strings hex_colors = palette.hex_colors # TODO: this only works cleanly for min:max or 2 endpoint values. Otherwise require that the number of palette colors match the number of values colors = [(values[0], Color.from_hex(hex_colors[0]))] intermediate_colors = hex_colors[1:-1] if intermediate_colors: interval = (values[-1] - values[0]) / (len(intermediate_colors) + 1) for i, color in enumerate(intermediate_colors): colors.append((values[0] + (i + 1) * interval, Color.from_hex(color))) colors.append((values[-1], Color.from_hex(hex_colors[-1]))) return StretchedRenderer(colors, colorspace='rgb', fill_value=fill_value) # I think all palettable palettes are in RGB ramps
def _palette_to_stretched_renderer(palette_path, values, filenames=None, variable=None): index = palette_path.rindex('.') palette = getattr(importlib.import_module('palettable.' + palette_path[:index]), palette_path[index+1:]) values = values.split(',') if not len(values) > 1: raise ValueError('Must provide at least 2 values for palette-based stretched renderer') statistics = None if 'min' in values or 'max' in values: if not filenames and variable: raise ValueError('filenames and variable are required inputs to use palette with statistics') statistics = collect_statistics(filenames, (variable,))[variable] for statistic in ('min', 'max'): if statistic in values: values[values.index(statistic)] = statistics[statistic] hex_colors = palette.hex_colors # TODO: this only works cleanly for min:max or 2 endpoint values. Otherwise require that the number of palette colors match the number of values colors = [(values[0], Color.from_hex(hex_colors[0]))] intermediate_colors = hex_colors[1:-1] if intermediate_colors: interval = (values[-1] - values[0]) / (len(intermediate_colors) + 1) for i, color in enumerate(intermediate_colors): colors.append((values[0] + (i + 1) * interval, Color.from_hex(color))) colors.append((values[-1], Color.from_hex(hex_colors[-1]))) return StretchedRenderer(colors, colorspace='rgb') # I think all palettable palettes are in RGB ramps
def get_results_image(self, bounds, size, single_color, kept_colors, gained_colors, species, historic, futures): kept_colors = self.get_colors(kept_colors, len(futures)+1) gained_colors = self.get_colors(gained_colors, len(futures)+1) extent = BBox(bounds, projection=WGS84) self.service = Service.objects.get(name='{}_p{}_800m_pa'.format(species, historic)) variable = self.service.variable_set.all().first() native_extent = extent.project(Proj(str(variable.projection))) coords = SpatialCoordinateVariables.from_bbox(variable.full_extent, *self.get_grid_spatial_dimensions(variable)) x_slice = coords.x.indices_for_range(native_extent.xmin, native_extent.xmax) y_slice = coords.y.indices_for_range(native_extent.ymin, native_extent.ymax) historic_data = self.get_grid_for_variable(variable, x_slice=x_slice, y_slice=y_slice) self.close_dataset() if not futures: data = historic_data renderer = UniqueValuesRenderer([(1, Color.from_hex(single_color))], fill_value=0) else: future_grids = [] for future in futures: self.service = Service.objects.get(name='{}_15gcm_{}_pa'.format(species, future)) variable = self.service.variable_set.all().first() future_grids.append(self.get_grid_for_variable(variable, x_slice=x_slice, y_slice=y_slice)) self.close_dataset() future_data = sum(future_grids) del future_grids data = numpy.zeros_like(historic_data, numpy.uint8) data[historic_data == 1] = 1 kept_idx = (historic_data == 1) & (future_data > 0) data[kept_idx] = future_data[kept_idx] + 1 gained_idx = (historic_data == 0) & (future_data > 0) data[gained_idx] = future_data[gained_idx] + len(kept_colors) + 1 data[data.mask == 1] = 0 values = numpy.unique(data) renderer = UniqueValuesRenderer( [ (i+1, Color.from_hex(c)) for (i, c) in enumerate(kept_colors) if i+1 in values ] + [ (i+len(kept_colors)+1, Color.from_hex(c)) for (i, c) in enumerate(gained_colors) if i+len(kept_colors)+1 in values ], fill_value=0 ) image = renderer.render_image(data.data).convert('RGBA') return GeoImage(image, native_extent).warp(extent, size).image
def test_get_renderers_by_name(): data = numpy.zeros((100, 100)) for i in range(0, 100): data[i] = i colors = ((10, Color(255, 0, 0, 255)), (50, Color(0, 255, 0, 255)), (data.max(), Color(0, 0, 255, 255))) renderer = get_renderer_by_name("classified")(colors) img = renderer.render_image(data) assert img.palette.palette == b'\xff\x00\x00\x00\xff\x00\x00\x00\xff\x00\x00\x00' assert img.size == (100, 100)
def colormap_to_stretched_renderer(colormap, colorspace='hsv', filenames=None, variable=None, fill_value=None, mask=None): statistics = None if 'min:' in colormap or 'max:' in colormap or 'mean' in colormap: if not filenames and variable: raise ValueError( 'filenames and variable are required inputs to use colormap with statistics' ) statistics = collect_statistics(filenames, (variable, ), mask=mask)[variable] colors = [] for entry in colormap.split(','): value, color = entry.split(':') # TODO: add proportions of statistics if value in ('min', 'max', 'mean'): value = statistics[value] else: value = float(value) colors.append((value, Color.from_hex(color))) return StretchedRenderer(colors, colorspace=colorspace, fill_value=fill_value)
def palette_to_stretched_renderer(palette_path, values, filenames=None, variable=None, fill_value=None, mask=None): palette = get_palette(palette_path) values = values.split(',') if not len(values) > 1: raise ValueError( 'Must provide at least 2 values for palette-based stretched renderer' ) if 'min' in values or 'max' in values: if not filenames and variable: raise ValueError( 'filenames and variable are required inputs to use palette with statistics' ) statistics = collect_statistics(filenames, (variable, ), mask=mask)[variable] for statistic in ('min', 'max'): if statistic in values: values[values.index(statistic)] = statistics[statistic] values = [float(v) for v in values] # in case any are still strings hex_colors = palette.hex_colors # TODO: this only works cleanly for min:max or 2 endpoint values. Otherwise require that the number of palette colors match the number of values colors = [(values[0], Color.from_hex(hex_colors[0]))] intermediate_colors = hex_colors[1:-1] if intermediate_colors: interval = (values[-1] - values[0]) / (len(intermediate_colors) + 1) for i, color in enumerate(intermediate_colors): colors.append( (values[0] + (i + 1) * interval, Color.from_hex(color))) colors.append((values[-1], Color.from_hex(hex_colors[-1]))) return StretchedRenderer( colors, colorspace='rgb', fill_value=fill_value ) # I think all palettable palettes are in RGB ramps
def _palette_to_stretched_renderer(palette_path, values, filenames=None, variable=None): index = palette_path.rindex('.') palette = getattr( importlib.import_module('palettable.' + palette_path[:index]), palette_path[index + 1:]) values = values.split(',') if not len(values) > 1: raise ValueError( 'Must provide at least 2 values for palette-based stretched renderer' ) statistics = None if 'min' in values or 'max' in values: if not filenames and variable: raise ValueError( 'filenames and variable are required inputs to use palette with statistics' ) statistics = collect_statistics(filenames, (variable, ))[variable] for statistic in ('min', 'max'): if statistic in values: values[values.index(statistic)] = statistics[statistic] hex_colors = palette.hex_colors # TODO: this only works cleanly for min:max or 2 endpoint values. Otherwise require that the number of palette colors match the number of values colors = [(values[0], Color.from_hex(hex_colors[0]))] intermediate_colors = hex_colors[1:-1] if intermediate_colors: interval = (values[-1] - values[0]) / (len(intermediate_colors) + 1) for i, color in enumerate(intermediate_colors): colors.append( (values[0] + (i + 1) * interval, Color.from_hex(color))) colors.append((values[-1], Color.from_hex(hex_colors[-1]))) return StretchedRenderer( colors, colorspace='rgb') # I think all palettable palettes are in RGB ramps
def test_color(): color_tuple = (0, 0, 0) c = Color(*color_tuple) assert c.to_tuple() == color_tuple assert c.to_hex() == "#000" c2 = Color.from_hsv(*c.to_hsv()) assert c2.to_tuple() == color_tuple assert Color.from_hex("#000000", alpha=100)
def _create_elevation_service(self, zone, band, data, nodata_value, coords): low, high = band[:2] elevation_service_name = "zones/elevation/{}_{}_{}".format(zone.zone_uid, low, high) bbox = coords.bbox # Delete and recreate service as needed service = Service.objects.filter(name=elevation_service_name) if service.exists(): return service.first() rel_path = elevation_service_name + ".nc" abs_path = os.path.join(SERVICE_DATA_ROOT, rel_path) if not os.path.exists(os.path.dirname(abs_path)): os.makedirs(os.path.dirname(abs_path)) with Dataset(abs_path, "w", format="NETCDF4") as ds: coords.add_to_dataset(ds, "longitude", "latitude") data_var = ds.createVariable( "data", data.dtype, dimensions=("latitude", "longitude"), fill_value=nodata_value, ) data_var[:] = data set_crs(ds, "data", Proj("epsg:4326")) # extract out unmasked data masked_data = data[data != nodata_value] renderer = StretchedRenderer( [(masked_data.min().item(), Color(46, 173, 60),), (masked_data.max().item(), Color(46, 173, 60),),] ) service = Service.objects.create( name=elevation_service_name, description="Elevation for zone {}, {} - {}".format(zone.name, low, high), data_path=rel_path, projection="epsg:4326", full_extent=bbox, initial_extent=bbox, ) Variable.objects.create( service=service, index=0, variable="data", projection="epsg:4326", x_dimension="longitude", y_dimension="latitude", name="data", renderer=renderer, full_extent=bbox, ) return service
def renderer_from_dict(renderer_dict): """Returns a renderer object from a dictionary object""" options = renderer_dict.get('options', {}) try: renderer_type = renderer_dict['type'] renderer_colors = [(float(x[0]), Color.from_hex(x[1])) for x in renderer_dict['colors']] fill_value = options.get('fill_value') if fill_value is not None: fill_value = float(fill_value) except KeyError: raise ValueError("Missing required keys from renderer renderer_dicturation") renderer_kwargs = { 'colormap': renderer_colors, 'fill_value': fill_value, 'background_color': Color(255, 255, 255, 0) } if renderer_type == "stretched": color_space = options.get('color_space', 'hsv').lower().strip() if not color_space in ('rgb', 'hsv'): raise ValueError("Invalid color space: {}".format(color_space)) renderer = StretchedRenderer(colorspace=color_space, **renderer_kwargs) elif renderer_type == "classified": renderer = ClassifiedRenderer(**renderer_kwargs) elif renderer_type == "unique": try: labels = [six.text_type(x) for x in options.get('labels', [])] except TypeError: raise ValueError("Labels option must be an array") renderer = UniqueValuesRenderer(labels=labels, **renderer_kwargs) return renderer
def colormap_to_stretched_renderer(colormap, colorspace='hsv', filenames=None, variable=None, fill_value=None, mask=None): statistics = None if 'min:' in colormap or 'max:' in colormap or 'mean' in colormap: if not filenames and variable: raise ValueError('filenames and variable are required inputs to use colormap with statistics') statistics = collect_statistics(filenames, (variable,), mask=mask)[variable] colors = [] for entry in colormap.split(','): value, color = entry.split(':') # TODO: add proportions of statistics if value in ('min', 'max', 'mean'): value = statistics[value] else: value = float(value) colors.append((value, Color.from_hex(color))) return StretchedRenderer(colors, colorspace=colorspace, fill_value=fill_value)
def palette_to_classified_renderer(palette_path, filenames, variable, method='equal', fill_value=None, mask=None): palette = get_palette(palette_path) num_breaks = palette.number colors = [Color(r, g, b) for (r, g, b) in palette.colors] if method == 'equal': statistics = collect_statistics(filenames, (variable, ), mask=mask)[variable] step = (statistics['max'] - statistics['min']) / num_breaks breaks = numpy.linspace(statistics['min'] + step, statistics['max'], num_breaks) return ClassifiedRenderer(zip(breaks, colors), fill_value=fill_value)
def __init__(self, colormap, fill_value, background_color): """ Construct a new renderer. :param colormap: [(value or class break, Color object)...] :param fill_value: value to fill with background color (if provided) or transparent :param background_color: the background color to apply to all areas not specifically handled by colormap, including areas with fill_value or masked out. """ if background_color is not None: assert isinstance(background_color, Color) else: background_color = Color(0, 0, 0, 0) self.colormap = list(colormap) self.fill_value = fill_value self.background_color = background_color self.colormap.sort(key=lambda x: x[0]) self.values = numpy.array([entry[0] for entry in self.colormap]) self._generate_palette()
SEEDSOURCE_TITLE = getattr(settings, 'SEEDSOURCE_TITLE', 'Seedlot Selection Tool') PDF_TEMPLATE = getattr(settings, 'REPORT_PDF_TEMPLATE', 'pdf/report.html') TILE_SIZE = (256, 256) IMAGE_SIZE = (645, 430) YEAR_LABELS = { '1961_1990': '1961-1990', '1981_2010': '1981-2010', '2025': '2011-2040', '2055': '2041-2070', '2085': '2071-2100' } RESULTS_RENDERER = StretchedRenderer([(0, Color(240, 59, 32)), (50, Color(254, 178, 76)), (100, Color(255, 237, 160))]) class Report(object): def __init__(self, configuration, zoom, center, tile_layers, opacity): self.configuration = configuration self.zoom = zoom self.center = center self.tile_layers = tile_layers self.opacity = opacity def get_year(self, climate): return YEAR_LABELS[climate['time']]
import json from PIL import Image import numpy from trefoil.utilities.color import Color LEGEND_ELEMENT_BORDER_COLOR = Color(150, 150, 150, 0) LEGEND_ELEMENT_BORDER_WIDTH = 1 class RasterRenderer(object): def __init__(self, colormap, fill_value, background_color): """ Construct a new renderer. :param colormap: [(value or class break, Color object)...] :param fill_value: value to fill with background color (if provided) or transparent :param background_color: the background color to apply to all areas not specifically handled by colormap, including areas with fill_value or masked out. """ if background_color is not None: assert isinstance(background_color, Color) else: background_color = Color(0, 0, 0, 0) self.colormap = list(colormap) self.fill_value = fill_value self.background_color = background_color self.colormap.sort(key=lambda x: x[0]) self.values = numpy.array([entry[0] for entry in self.colormap])
def handle(self, datasets, directory, overwrite, *args, **options): old_files = [] for dataset in datasets: filename = os.path.basename(dataset) name = os.path.splitext(filename)[0] if directory is not None: filename = '{}/{}'.format(directory.strip('/'), filename) name = '{}/{}'.format(directory.strip('/'), name) with transaction.atomic(): existing = Service.objects.filter(name__iexact=name) if existing.exists(): if overwrite: old_files.append( os.path.join(SERVICE_DIR, existing.get().data_path)) existing.delete() else: raise CommandError( "A service named '{}' already exists".format(name)) with Dataset(dataset, 'r') as ds: variables = [] x_dimension = None y_dimension = None projection = None desc = describe(ds) for variable, variable_info in desc['variables'].items(): if 'spatial_grid' in variable_info: variables.append(variable) spatial_grid = variable_info['spatial_grid'] x_dimension = spatial_grid['x_dimension'] y_dimension = spatial_grid['y_dimension'] projection = Proj(variable_info['proj4']) if not variables: raise CommandError('No usable variables found') coords = SpatialCoordinateVariables.from_dataset( ds, x_dimension, y_dimension, projection=projection) service = Service.objects.create(name=name, data_path=filename, projection=coords.projection, full_extent=coords.bbox, initial_extent=coords.bbox) for variable in variables: Variable.objects.create(service=service, index=0, variable=variable, projection=projection, x_dimension=x_dimension, y_dimension=y_dimension, name=variable, renderer=StretchedRenderer([ (variable_info['min'], Color(0, 0, 0)), (variable_info['max'], Color(255, 255, 255)) ]), full_extent=coords.bbox) print('Added {}...'.format(name)) for path in old_files: if os.path.exists(path): os.remove(path) for dataset in datasets: target_dir = Path(SERVICE_DIR) / (directory or '') if not os.path.exists(target_dir): os.makedirs(target_dir) shutil.copy(dataset, target_dir)
} } if not DEBUG: WEBPACK_LOADER['DEFAULT']['BUNDLE_DIR_NAME'] = '/' NC_REGISTERED_JOBS = { 'generate_scores': { 'type': 'task', 'task': 'seedsource_core.django.seedsource.tasks.generate_scores.GenerateScores', 'publish_raster_results': True, 'results_renderer': StretchedRenderer([(100, Color(240, 59, 32)), (50, Color(254, 178, 76)), (0, Color(255, 237, 160))]) }, 'write_tif': { 'type': 'task', 'task': 'seedsource_core.django.seedsource.tasks.write_tif.WriteTIF', }, } NC_INSTALLED_INTERFACES = ('ncdjango.interfaces.data', 'ncdjango.interfaces.arcgis_extended', 'ncdjango.interfaces.arcgis', 'seedsource_core.interfaces.tiles') NC_ENABLE_STRIDING = True
'POLL_INTERVAL': 0.1, 'TIMEOUT': None, 'IGNORE': ['.+\.hot-update.js', '.+\.map'] } } if not DEBUG: WEBPACK_LOADER['DEFAULT']['BUNDLE_DIR_NAME'] = '/' NC_REGISTERED_JOBS = { 'generate_scores': { 'type': 'task', 'task': 'seedsource_core.django.seedsource.tasks.generate_scores.GenerateScores', 'publish_raster_results': True, 'results_renderer': StretchedRenderer([ (100, Color(240, 59, 32)), (50, Color(254, 178, 76)), (0, Color(255, 237, 160)) ]) }, 'write_tif': { 'type': 'task', 'task': 'seedsource_core.django.seedsource.tasks.write_tif.WriteTIF', }, } NC_INSTALLED_INTERFACES = ( 'ncdjango.interfaces.data', 'ncdjango.interfaces.arcgis_extended', 'ncdjango.interfaces.arcgis',
import pyproj from PIL import Image from django.views.generic.base import View from trefoil.geometry.bbox import BBox from trefoil.render.renderers.unique import UniqueValuesRenderer from trefoil.utilities.color import Color from django.http import HttpResponse from functools import reduce from ncdjango.geoimage import GeoImage from ncdjango.models import Service from ncdjango.views import GetImageViewBase from pyproj import Proj TILE_SIZE = (256, 256) RENDERER = UniqueValuesRenderer([(1, Color(158, 202, 225))], fill_value=0) WGS84 = Proj('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs') WEB_MERCATOR = Proj( '+proj=merc +lon_0=0 +k=1 +x_0=0 +y_0=0 +a=6378137 +b=6378137 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs' ) class IntersectView(GetImageViewBase): def dispatch(self, request, *args, **kwargs): self.service = None return View.dispatch(self, request, *args, **kwargs) def get(self, request, *args, **kwargs): tile_bounds = list( mercantile.bounds(int(self.kwargs['x']), int(self.kwargs['y']), int(self.kwargs['z'])))
from trefoil.render.renderers.stretched import StretchedRenderer from trefoil.render.renderers.unique import UniqueValuesRenderer from trefoil.utilities.color import Color from . import utils DEFAULT_IMAGE_SIZE = (400, 400) DEFAULT_IMAGE_FORMAT = "png" DEFAULT_BACKGROUND_COLOR = Color(0, 0, 0, 0) class ImageConfiguration(object): """Properties for the image request""" def __init__(self, **kwargs): self.extent = kwargs.get('extent') self.size = kwargs.get('size', DEFAULT_IMAGE_SIZE) self.image_format = kwargs.get('image_format', DEFAULT_IMAGE_FORMAT) self.background_color = kwargs.get('background_color', DEFAULT_BACKGROUND_COLOR) class ConfigurationBase(object): """Base request configuration class""" def __init__(self, variable, time_index=None): self.variable = variable self.time_index = time_index def set_time_index_from_datetime(self, value, best_fit=True): """ Sets the time_index parameter from a datetime using start/end/interval/units information from the service configuration. If best_fit is True, the method will match the closest time index for the given value, otherwise it will raise a ValueError for any value which doesn't exactly match a time index.
def handle(self, data_files, overwrite, *args, **options): with transaction.atomic(): for data_file in data_files: if not os.path.isdir(SERVICE_DATA_ROOT): raise CommandError('Directory %s does not exist.' % SERVICE_DATA_ROOT) # Check for existence of file or service. There's a possible race # condition here, but this is a management command, not a user command. file_exists = False svc_exists = False target = os.path.join(SERVICE_DATA_ROOT, os.path.basename(data_file)) if os.path.exists(target): if not overwrite: self.stderr.write('File %s already exists.\n' % target) file_exists = True svc_name = os.path.basename(data_file).split('.')[0] if Service.objects.filter(name=svc_name).exists(): if overwrite: Service.objects.filter(name=svc_name).delete() else: self.stderr.write('Service %s already exists.\n' % svc_name) svc_exists = True if (file_exists or svc_exists) and not overwrite: raise CommandError('No changes made.') desc = describe(data_file) grid = next(v['spatial_grid'] for k, v in desc['variables'].items() if v.get('spatial_grid')) extent = grid['extent'] proj = extent['proj4'] bbox = BBox( [extent[c] for c in ['xmin', 'ymin', 'xmax', 'ymax']], pyproj.Proj(proj) ) renderer = UniqueValuesRenderer([(1, Color(0, 0, 0, 255))], fill_value=0) if file_exists: os.remove(os.path.join(SERVICE_DATA_ROOT, os.path.basename(data_file))) shutil.copy(data_file, SERVICE_DATA_ROOT) service = Service.objects.create( name=svc_name, projection=proj, full_extent=bbox, initial_extent=bbox, data_path=os.path.basename(data_file) ) for i, (variable_name, variable) in enumerate(desc['variables'].items()): grid = variable.get('spatial_grid') if grid is None: continue extent = grid['extent'] bbox = BBox( [extent[c] for c in ['xmin', 'ymin', 'xmax', 'ymax']], pyproj.Proj(extent['proj4']) ) Variable.objects.create( service=service, index=i, variable=variable_name, projection=proj, x_dimension=grid['x_dimension'], y_dimension=grid['y_dimension'], name=variable_name, renderer=renderer, full_extent=bbox )
def _parse_colormap(colormap_str): colormap = [] for entry in colormap_str.split(','): value, color = entry.split(':') colormap.append((float(value), Color.from_hex(color))) return colormap
def handle(self, region_name, *args, **options): name = region_name[0] from django.conf import settings BASE_DIR = settings.NC_SERVICE_DATA_ROOT # determine extent and lat/lon variable names from DEM dem_path = os.path.join(BASE_DIR, "regions", name, "{}_dem.nc".format(name)) with Dataset(dem_path, "r") as ds: dims = ds.dimensions.keys() lat = "lat" if "lat" in dims else "latitude" lon = "lon" if "lon" in dims else "longitude" l = float(ds.variables[lon][:].min()) b = float(ds.variables[lat][:].min()) r = float(ds.variables[lon][:].max()) t = float(ds.variables[lat][:].max()) extent = BBox((l, b, r, t), projection=pyproj.Proj(WGS84)) # Generate DEM service with transaction.atomic(): print("Adding {}".format(name)) print("---") print("elevation") service_name = "{}_dem".format(name) if Service.objects.filter(name__iexact=service_name).exists(): print("{} already exists, skipping.".format(service_name)) else: dem_service = Service.objects.create( name=service_name, data_path="regions/{name}/{name}_dem.nc".format(name=name), projection=WGS84, full_extent=extent, initial_extent=extent, ) with Dataset(dem_path, "r") as ds: v_min = numpy.nanmin(ds.variables["elevation"][:]).item() v_max = numpy.nanmax(ds.variables["elevation"][:]).item() renderer = StretchedRenderer([(v_min, Color(0, 0, 0)), (v_max, Color(255, 255, 255))]) Variable.objects.create( service=dem_service, index=0, variable="elevation", projection=WGS84, x_dimension=lon, y_dimension=lat, name="elevation", renderer=renderer, full_extent=extent, ) # Generate ClimateNA services with transaction.atomic(): for year in PERIODS: print("") print(year) print("---") for var in VARS: print(var) service_name = "{}_{}Y_{}".format(name, year, var) if not Service.objects.filter(name__iexact=service_name).exists(): data_path = "regions/{name}/{year}Y/{name}_{year}Y_{var}.nc".format( name=name, year=year, var=var ) if not os.path.exists(os.path.join(BASE_DIR, data_path)): print("{} does not exist, skipping.".format(service_name)) continue service = Service.objects.create( name=service_name, data_path=data_path, projection=WGS84, full_extent=extent, initial_extent=extent, ) with Dataset(os.path.join(BASE_DIR, service.data_path), "r") as ds: dims = ds.dimensions.keys() lat = "lat" if "lat" in dims else "latitude" lon = "lon" if "lon" in dims else "longitude" v_min = numpy.nanmin(ds.variables[var][:]).item() v_max = numpy.nanmax(ds.variables[var][:]).item() renderer = StretchedRenderer([(v_min, Color(0, 0, 0)), (v_max, Color(255, 255, 255))]) variable = Variable.objects.create( service=service, index=0, variable=var, projection=WGS84, x_dimension=lon, y_dimension=lat, name=var, renderer=renderer, full_extent=extent, ) else: print("{} already exists, skipping.".format(service_name))
def process_web_outputs(results, job, publish_raster_results=False, renderer_or_fn=None): outputs = results.format_args() for k, v in iter(outputs.items()): if is_raster(v) and publish_raster_results: service_name = '{0}/{1}'.format(job.uuid, k) rel_path = '{}.nc'.format(service_name) abs_path = os.path.join(SERVICE_DATA_ROOT, rel_path) os.makedirs(os.path.dirname(abs_path)) with Dataset(abs_path, 'w', format='NETCDF4') as ds: if is_latlong(v.extent.projection): x_var = 'longitude' y_var = 'latitude' else: x_var = 'x' y_var = 'y' coord_vars = SpatialCoordinateVariables.from_bbox( v.extent, *reversed(v.shape)) coord_vars.add_to_dataset(ds, x_var, y_var) fill_value = v.fill_value if numpy.ma.core.is_masked( v) else None data_var = ds.createVariable('data', v.dtype, dimensions=(y_var, x_var), fill_value=fill_value) data_var[:] = v set_crs(ds, 'data', v.extent.projection) if callable(renderer_or_fn): renderer = renderer_or_fn(v) elif renderer_or_fn is None: renderer = StretchedRenderer([ (numpy.min(v).item(), Color(0, 0, 0)), (numpy.max(v).item(), Color(255, 255, 255)) ]) else: renderer = renderer_or_fn with transaction.atomic(): service = Service.objects.create( name=service_name, description= ('This service has been automatically generated from the result of a geoprocessing job.' ), data_path=rel_path, projection=v.extent.projection.srs, full_extent=v.extent, initial_extent=v.extent, ) Variable.objects.create(service=service, index=0, variable='data', projection=v.extent.projection.srs, x_dimension=x_var, y_dimension=y_var, name='data', renderer=renderer, full_extent=v.extent) ProcessingResultService.objects.create(job=job, service=service) outputs[k] = service_name elif is_ndarray(v): if v.size < numpy.get_printoptions()['threshold']: outputs[k] = v.tolist() else: outputs[k] = str(v) return outputs
from trefoil.render.renderers.stretched import StretchedRenderer from trefoil.utilities.color import Color from ncdjango.config import RenderConfiguration, IdentifyConfiguration, LegendConfiguration, ImageConfiguration from ncdjango.exceptions import ConfigurationError from ncdjango.models import Service, Variable from ncdjango.utils import proj4_to_epsg, date_to_timestamp from ncdjango.views import GetImageViewBase, IdentifyViewBase, LegendViewBase, FORCE_WEBP from .forms import GetImageForm, IdentifyForm from .utils import extent_to_envelope ALLOW_BEST_FIT_TIME_INDEX = getattr(settings, 'NC_ALLOW_BEST_FIT_TIME_INDEX', True) TRANSPARENT_BACKGROUND_COLOR = Color(255, 255, 255, 0) DEFAULT_BACKGROUND_COLOR = Color(255, 255, 255) SUPPORTED_IMAGE_FORMATS = ('PNG', 'PNG8', 'PNG24', 'PNG32', 'JPEG', 'GIF', 'BMP') TIME_UNITS_MAP = { 'milliseconds': 'esriTimeUnitsMilliseconds', 'seconds': 'esriTimeUnitsSeconds', 'minutes': 'esriTimeUnitsMinutes', 'hours': 'esriTimeUnitsHours', 'days': 'esriTimeUnitsDays', 'weeks': 'esriTimeUnitsWeeks', 'months': 'esriTimeUnitsMonths', 'years': 'esriTimeUnitsYears', 'decades': 'esriTimeUnitsDecades', 'centuries': 'esriTimeUnitsCenturies'
def _parse_colormap(colormap_str): colormap = [] for entry in colormap_str.split(','): value, color = entry.split(':') colormap.append((float(value), Color.from_hex(color))) return colormap