def rebuild_unblobbed_cache(username=None, db_name=None, taskname=None): """ Populate fews unblobbed cache for better user experience """ handler = get_handler(username=username, taskname=taskname) logger = logging.getLogger(__name__) logger.addHandler(handler) logger.setLevel(20) logger.info('Processing filter tree...') fews_filters(ignore_cache=True) logger.info('Processing Timeserie.has_data_dict...') Timeserie.has_data_dict(ignore_cache=True) logger.info('Processing filters...') for f in Filter.objects.all(): f.parameters() logger.info('Finished successfully.') return 'OK'
def _timeseries(self): """ Get list of dicts of all timeseries. Optimized for performance. """ cache_key = 'lizard_fewsunblobbed.layers.timeseries_%s_%s' % ( self.filterkey, self.parameterkey) result = cache.get(cache_key) if result is None: # fetching locationkey and parameterkey seems to be very expensive parameter = Parameter.objects.get(pk=self.parameterkey) # Pre load all used locations in a dictionary. # In some cases it takes 3 seconds? locations = dict([(location.pk, location) for location in Location.objects.filter( timeserie__filterkey=self.filterkey)]) result = [] related_timeseries = list(Timeserie.objects.filter( filterkey=self.filterkey, parameterkey=self.parameterkey)) # Fetch cached has_data dict for all timeseries. timeseries_has_data = Timeserie.has_data_dict() for timeserie in related_timeseries: location = locations[timeserie.locationkey_id] name = u'%s (%s): %s' % (parameter.name, parameter.unit, location.name) shortname = u'%s' % location.name result.append( {'rd_x': location.x, 'rd_y': location.y, 'longitude': location.longitude, 'latitude': location.latitude, 'object': timeserie, 'location_name': location.name.encode('ascii', 'replace'), # ^^^ This used to be ``str(location.name)``. # Which fails on non-ascii input. # TODO: does it really need to be a string? # It seems to be proper unicode to begin with. 'name': name, 'shortname': shortname, 'workspace_item': self.workspace_item, 'identifier': {'locationkey': location.pk}, 'google_coords': location.google_coords(), 'has_data': timeserie.pk in timeseries_has_data, }) cache.set(cache_key, result, 8 * 60 * 60) else: # the workspace_item can be different, so overwrite with our own for row in result: row['workspace_item'] = self.workspace_item return copy.deepcopy(result)