def timeseries(self): ts = GroundwaterTimeSeries(use_header=self.logged_in) page = self.request.GET.get('active', 'startpage') uuid = self.request.session[page]['uuid'] data = [] if uuid != "EMPTY": ts.uuid(ts_uuid=self.request.session[page]['uuid'], organisation=self.selected_organisation_id, **self.time_window) if len(ts.results): data = [{ 'y': x['max'], 'x': x['timestamp'] } for x in ts.results[0]['events']] self.data = { 'values': data, 'key': 'Groundwaterlevels (m)', 'color': '#1abc9c' } self.request.session[page]['timeseries_length'] = len(data) self.request.session.modified = True return self.data
def get(self, request, *args, **kwargs): logger.debug('Downloading csv for %s', self.selected_organisation) ts = GroundwaterTimeSeries(use_header=self.logged_in) header, csv_ = ts.all_to_csv( organisation=self.selected_organisation_id) response = HttpResponse(content_type='text/csv') filename = slugify(self.selected_organisation)[:80] + \ "_ggmn_timeseries.csv" response['Content-Disposition'] = 'attachment; filename="' + \ filename + '"' writer = csv.writer(response) writer.writerow(['uuid', 'name', 'location_name', 'x', 'y']) for row in header: writer.writerow(row) writer.writerow([]) writer.writerow(['name', 'uuid', 'timestamp', 'value']) for row in csv_: writer.writerow(row) logger.debug('Wrote all data to csv-response, ' 'filename of output csv is: %s', filename) return response
def timeseries(self): ts = GroundwaterTimeSeries(use_header=self.logged_in) page = self.request.GET.get('active', 'startpage') uuid = self.request.session[page]['uuid'] data = [] if uuid != "EMPTY": ts.uuid(ts_uuid=self.request.session[page]['uuid'], organisation=self.selected_organisation_id, **self.time_window) if len(ts.results): data = [{'y': x['max'], 'x': x['timestamp']} for x in ts.results[0]['events']] self.data = { 'values': data, 'key': 'Groundwaterlevels (m)', 'color': '#1abc9c' } self.request.session[page]['timeseries_length'] = len(data) self.request.session.modified = True return self.data
def timeseries_(self): gw_type, statistic = [ x.strip(' ') for x in self.request.session['map_'].get( 'dropdown_0', {'value': 'GWmBGS | mean'})['value'].split('|') ] timeseries = GroundwaterTimeSeries(use_header=self.logged_in) timeseries.queries = { "name": gw_type } south_west, north_east = self.coordinates timeseries.bbox(south_west=south_west, north_east=north_east, organisation=self.selected_organisation_id, statistic=statistic, **self.time_window) result = timeseries.ts_to_dict( start_date=jsdt.datestring_to_js(self.request.session['map_'][ 'datepicker']['start']), end_date=jsdt.datestring_to_js(self.request.session['map_'][ 'datepicker']['end']), date_time='str' ) return result
def timeseries_(self): gw_type, statistic = [ x.strip(' ') for x in self.request.session['map_'].get( 'dropdown_0', {'value': 'GWmBGS | mean'})['value'].split('|') ] timeseries = GroundwaterTimeSeries(use_header=self.logged_in) timeseries.queries = {"name": gw_type} south_west, north_east = self.coordinates timeseries.bbox(south_west=south_west, north_east=north_east, organisation=self.selected_organisation_id, statistic=statistic, **self.time_window) result = timeseries.ts_to_dict( start_date=jsdt.datestring_to_js( self.request.session['map_']['datepicker']['start']), end_date=jsdt.datestring_to_js( self.request.session['map_']['datepicker']['end']), date_time='str') return result
def timeseries(self): ts = GroundwaterTimeSeries(use_header=self.logged_in) ts.location_uuid(organisation=self.selected_organisation_id, loc_uuid=self.uuid) return ts.results
def get(self, request, *args, **kwargs): # we need the referer to reload the page with the task url set. referer = request.META.get('HTTP_REFERER') if not self.downloading: # start the task logger.debug('Downloading csv for %s', self.selected_organisation) ts = GroundwaterTimeSeries(use_header=self.logged_in) task_url, extra_queries = ts.start_csv_task( organisation=self.selected_organisation_id) download_organisations = self.request.session.get( 'download', {}).get('organisations', {}) self.set_session_value('download', 'error_message', 'Starting download.') download_organisations.update({ self.selected_organisation_id: { 'url': task_url, 'extra_queries': extra_queries, 'downloading': True } }) print(download_organisations) self.request.session['download'][ 'organisations'] = download_organisations self.request.session.modified = True # task is start and set, return with the current (referred) view. return redirect(referer) # download was started earlier try: # try to download the result extra_queries = self.request.session['download']['organisations'][ self.selected_organisation_id]['extra_queries'] header, csv_ = self.task.timeseries_csv( organisation=self.selected_organisation_id, extra_queries_ts=extra_queries) except LizardApiError: # the download failed, which means the download is not yet ready. if referer: logger.debug('download failed, referer:', referer) self.set_session_value('download', 'error_message', 'Your download is not ready.') # we either return redirect(referer) # when a download is called without a referer and the task is not # yet finished return the placeholder view that states the download # is not yet finished: return super().get(request, *args, **kwargs) # The task is finished create a csv response from the timseries_csv. response = HttpResponse(content_type='text/csv') filename = slugify(self.selected_organisation)[:80] + \ "_ggmn_timeseries.csv" response['Content-Disposition'] = 'attachment; filename="' + \ filename + '"' writer = csv.writer(response) writer.writerow(['uuid', 'name', 'location_name', 'x', 'y']) for row in header: writer.writerow(row) writer.writerow([]) writer.writerow(['name', 'uuid', 'timestamp', 'value']) for row in csv_: writer.writerow(row) logger.debug( 'Wrote all data to csv-response, ' 'filename of output csv is: %s', filename) # cleanup the session, so the download can start again later. self.set_session_value('download', 'downloading', False) download_organisations = self.request.session.get('download', {}).get( 'organisations', {}) del download_organisations[self.selected_organisation_id] self.set_session_value('download', 'organisations', download_organisations) return response