def test_xldate_from_date_tuple(self): date = xldate.xldate_from_date_tuple( (1907, 7, 3), DATEMODE ) self.assertAlmostEqual(date, 2741.) date = xldate.xldate_from_date_tuple( (2005, 2, 23), DATEMODE ) self.assertAlmostEqual(date, 38406.) date = xldate.xldate_from_date_tuple( (1988, 5, 3), DATEMODE ) self.assertAlmostEqual(date, 32266.)
def test_xldate_from_date_tuple(self): date = xldate.xldate_from_date_tuple((1907, 7, 3), DATEMODE) self.assertAlmostEqual(date, 2741.) date = xldate.xldate_from_date_tuple((2005, 2, 23), DATEMODE) self.assertAlmostEqual(date, 38406.) date = xldate.xldate_from_date_tuple((1988, 5, 3), DATEMODE) self.assertAlmostEqual(date, 32266.)
def get_next_due(cls, stop_date=datetime.now() + timedelta(weeks=5200), start_date=datetime.now()): # TODO - This can almost certainly be performed as a single query. I need to learn how to do that. stop = xldate_from_date_tuple(stop_date.timetuple()[0:3], 0) start = xldate_from_date_tuple(start_date.timetuple()[0:3], 0) latest_ws = WorkSheet.objects.latest('modified_time') due_date_cells = cls.objects.all() \ .filter(worksheet=latest_ws) \ .filter(col_header__value="Next Procedure Date") \ .filter(content__gte=start) \ .filter(content__lte=stop) \ .order_by('content') # List of IPF numbers which have a Next Procedure Date between the start and stop. ipf_numbers = [c.ipf_num.value for c in due_date_cells ] # Should be possible to do this in the query. # New query gathering all Cells for the IPF Numbers identified as due between start & stop due_ipfs = cls.objects.all().filter(worksheet=latest_ws) \ .filter(ipf_num__value__in=ipf_numbers) return due_ipfs
def test_xldate_from_date_tuple(): date = xldate.xldate_from_date_tuple((1907, 7, 3), DATEMODE) assert date == pytest.approx(2741.) date = xldate.xldate_from_date_tuple((2005, 2, 23), DATEMODE) assert date == pytest.approx(38406.) date = xldate.xldate_from_date_tuple((1988, 5, 3), DATEMODE) assert date == pytest.approx(32266.)
def brfperiod(self): y, m, d = self._datastart.date().getDate() dstart = xldate_from_date_tuple((y, m, d), 0) y, m, d = self._dataend.date().getDate() dend = xldate_from_date_tuple((y, m, d), 0) return (dstart, dend)
def save_graph_layout(self): """Save the graph layout in the project hdf5 file.""" print("Saving the graph layout for well %s..." % self.wldset['Well'], end=" ") layout = { 'WLmin': self.waterlvl_max.value(), 'WLscale': self.waterlvl_scale.value(), 'RAINscale': self.Ptot_scale.value(), 'fwidth': self.page_setup_win.pageSize[0], 'fheight': self.page_setup_win.pageSize[1], 'va_ratio': self.page_setup_win.va_ratio, 'NZGrid': self.NZGridWL_spinBox.value(), 'bwidth_indx': self.qweather_bin.currentIndex(), 'date_labels_pattern': self.dateDispFreq_spinBox.value(), 'datemode': self.time_scale_label.currentText() } layout['wxdset'] = None if self.wxdset is None else self.wxdset.name year = self.date_start_widget.date().year() month = self.date_start_widget.date().month() layout['TIMEmin'] = xldate_from_date_tuple((year, month, 1), 0) year = self.date_end_widget.date().year() month = self.date_end_widget.date().month() layout['TIMEmax'] = xldate_from_date_tuple((year, month, 1), 0) if self.datum_widget.currentIndex() == 0: layout['WLdatum'] = 'mbgs' else: layout['WLdatum'] = 'masl' # ---- Page Setup layout['title_on'] = bool(self.page_setup_win.isGraphTitle) layout['legend_on'] = bool(self.page_setup_win.isLegend) layout['language'] = self.btn_language.language layout['trend_line'] = bool(self.page_setup_win.isTrendLine) layout['meteo_on'] = bool(self.page_setup_win.is_meteo_on) layout['glue_wl_on'] = bool(self.page_setup_win.is_glue_wl_on) layout['mrc_wl_on'] = bool(self.page_setup_win.is_mrc_wl_on) layout['figframe_lw'] = self.page_setup_win.figframe_lw # Save the colors : cdb = ColorsReader() cdb.load_colors_db() layout['colors'] = cdb.RGB # Save the layout : self.wldset.save_layout(layout) msg = 'Layout saved successfully for well %s.' % self.wldset['Well'] self.ConsoleSignal.emit('<font color=black>%s</font>' % msg) print("done")
def save_graph_layout(self): """Save the graph layout in the project hdf5 file.""" print("Saving the graph layout for well %s..." % self.wldset['Well'], end=" ") layout = {'WLmin': self.waterlvl_max.value(), 'WLscale': self.waterlvl_scale.value(), 'RAINscale': self.Ptot_scale.value(), 'fwidth': self.page_setup_win.pageSize[0], 'fheight': self.page_setup_win.pageSize[1], 'va_ratio': self.page_setup_win.va_ratio, 'NZGrid': self.NZGridWL_spinBox.value(), 'bwidth_indx': self.qweather_bin.currentIndex(), 'date_labels_pattern': self.dateDispFreq_spinBox.value(), 'datemode': self.time_scale_label.currentText()} layout['wxdset'] = None if self.wxdset is None else self.wxdset.name year = self.date_start_widget.date().year() month = self.date_start_widget.date().month() layout['TIMEmin'] = xldate_from_date_tuple((year, month, 1), 0) year = self.date_end_widget.date().year() month = self.date_end_widget.date().month() layout['TIMEmax'] = xldate_from_date_tuple((year, month, 1), 0) if self.datum_widget.currentIndex() == 0: layout['WLdatum'] = 'mbgs' else: layout['WLdatum'] = 'masl' # ---- Page Setup layout['title_on'] = bool(self.page_setup_win.isGraphTitle) layout['legend_on'] = bool(self.page_setup_win.isLegend) layout['language'] = self.btn_language.language layout['trend_line'] = bool(self.page_setup_win.isTrendLine) layout['meteo_on'] = bool(self.page_setup_win.is_meteo_on) layout['glue_wl_on'] = bool(self.page_setup_win.is_glue_wl_on) layout['mrc_wl_on'] = bool(self.page_setup_win.is_mrc_wl_on) layout['figframe_lw'] = self.page_setup_win.figframe_lw # Save the colors : cdb = ColorsReader() cdb.load_colors_db() layout['colors'] = cdb.RGB # Save the layout : self.wldset.save_layout(layout) msg = 'Layout saved successfully for well %s.' % self.wldset['Well'] self.ConsoleSignal.emit('<font color=black>%s</font>' % msg) print("done")
def update_graph_layout_parameter(self): # language : self.hydrograph.language = self.btn_language.language # Scales : self.hydrograph.WLmin = self.waterlvl_max.value() self.hydrograph.WLscale = self.waterlvl_scale.value() self.hydrograph.RAINscale = self.Ptot_scale.value() self.hydrograph.NZGrid = self.NZGridWL_spinBox.value() # WL Datum : self.hydrograph.WLdatum = self.datum_widget.currentIndex() # Dates : self.hydrograph.datemode = self.time_scale_label.currentText() year = self.date_start_widget.date().year() month = self.date_start_widget.date().month() self.hydrograph.TIMEmin = xldate_from_date_tuple((year, month, 1), 0) year = self.date_end_widget.date().year() month = self.date_end_widget.date().month() self.hydrograph.TIMEmax = xldate_from_date_tuple((year, month, 1), 0) self.hydrograph.date_labels_pattern = self.dateDispFreq_spinBox.value() # Page Setup : self.hydrograph.fwidth = self.page_setup_win.pageSize[0] self.hydrograph.fheight = self.page_setup_win.pageSize[1] self.hydrograph.va_ratio = self.page_setup_win.va_ratio self.hydrograph.trend_line = self.page_setup_win.isTrendLine self.hydrograph.isLegend = self.page_setup_win.isLegend self.hydrograph.isGraphTitle = self.page_setup_win.isGraphTitle self.hydrograph.set_meteo_on(self.page_setup_win.is_meteo_on) self.hydrograph.set_glue_wl_on(self.page_setup_win.is_glue_wl_on) self.hydrograph.set_mrc_wl_on(self.page_setup_win.is_mrc_wl_on) self.hydrograph.set_figframe_lw(self.page_setup_win.figframe_lw) # Weather bins : self.hydrograph.bwidth_indx = self.qweather_bin.currentIndex()
def update_graph_layout_parameter(self): # language : self.hydrograph.language = self.btn_language.language # Scales : self.hydrograph.WLmin = self.waterlvl_max.value() self.hydrograph.WLscale = self.waterlvl_scale.value() self.hydrograph.RAINscale = self.Ptot_scale.value() self.hydrograph.NZGrid = self.NZGridWL_spinBox.value() # WL Datum : self.hydrograph.WLdatum = self.datum_widget.currentIndex() # Dates : self.hydrograph.datemode = self.time_scale_label.currentText() year = self.date_start_widget.date().year() month = self.date_start_widget.date().month() self.hydrograph.TIMEmin = xldate_from_date_tuple((year, month, 1), 0) year = self.date_end_widget.date().year() month = self.date_end_widget.date().month() self.hydrograph.TIMEmax = xldate_from_date_tuple((year, month, 1), 0) self.hydrograph.date_labels_pattern = self.dateDispFreq_spinBox.value() # Page Setup : self.hydrograph.fwidth = self.page_setup_win.pageSize[0] self.hydrograph.fheight = self.page_setup_win.pageSize[1] self.hydrograph.va_ratio = self.page_setup_win.va_ratio self.hydrograph.trend_line = self.page_setup_win.isTrendLine self.hydrograph.isLegend = self.page_setup_win.isLegend self.hydrograph.isGraphTitle = self.page_setup_win.isGraphTitle self.hydrograph.set_meteo_on(self.page_setup_win.is_meteo_on) self.hydrograph.set_glue_wl_on(self.page_setup_win.is_glue_wl_on) self.hydrograph.set_mrc_wl_on(self.page_setup_win.is_mrc_wl_on) self.hydrograph.set_figframe_lw(self.page_setup_win.figframe_lw) # Weather bins : self.hydrograph.bwidth_indx = self.qweather_bin.currentIndex()
def load_weather_log(fname, varname): reader = open_weather_log(fname) xldates = [] for i in range(len(reader)): if reader[i][0] == varname: year = int(float(reader[i][1])) month = int(float(reader[i][2])) day = int(float(reader[i][3])) xldates.append(xldate_from_date_tuple((year, month, day), 0)) time = [] tseg = [np.nan, xldates[0], xldates[0]+1] for xldate in xldates: if tseg[2] == xldate: if xldate == xldates[-1]: # the last data of the series is missing time.extend(tseg) else: tseg[2] += 1 else: time.extend(tseg) tseg[1] = xldate tseg[2] = xldate + 1 time.append(np.nan) time = np.array(time) return time
def load_weather_log(fname, varname): reader = open_weather_log(fname) xldates = [] for i in range(len(reader)): if reader[i][0] == varname: year = int(float(reader[i][1])) month = int(float(reader[i][2])) day = int(float(reader[i][3])) xldates.append(xldate_from_date_tuple((year, month, day), 0)) time = [] tseg = [np.nan, xldates[0], xldates[0] + 1] for xldate in xldates: if tseg[2] == xldate: if xldate == xldates[-1]: # the last data of the series is missing time.extend(tseg) else: tseg[2] += 1 else: time.extend(tseg) tseg[1] = xldate tseg[2] = xldate + 1 time.append(np.nan) time = np.array(time) return time
def insertNewRows(incomingFiles, workbook): #add new files (in rows) into workbook TITLE = 0 #column name constants CTRLNUMBER = 1 ENTRY = 2 DATE = 2 ARCHIVED = 4 MD5 = 5 NOTES = 6 DELIMITER = '_' FILENAME = 0 #dictionary pair values previousAsset = '' incomingFiles = sorted(incomingFiles) #previous asset to compare incoming file to for file in incomingFiles: try: filename = file.filename filetype = getFiletype(file) worksheet = workbook[filetype] newrow = deepcopy(worksheet[0]) #create new row using deepcopy print "inserting",filename,"into",filetype for cell in newrow: cell.value = "" newrow[TITLE].value = unicode(filename) date = datetime.datetime.today() dateVal = xldate.xldate_from_date_tuple(date.timetuple()[0:3], 0) newrow[DATE].value = dateVal worksheet.append(newrow) workbook[filetype] = worksheet except KeyError, e: print file.filename, e
def set_time_scale(self): """Setup the time scale of the x-axis.""" if self.datemode.lower() == 'year': year = xldate_as_tuple(self.TIMEmin, 0)[0] self.TIMEmin = xldate_from_date_tuple((year, 1, 1), 0) last_month = xldate_as_tuple(self.TIMEmax, 0)[1] == 1 last_day = xldate_as_tuple(self.TIMEmax, 0)[2] == 1 if last_month and last_day: pass else: year = xldate_as_tuple(self.TIMEmax, 0)[0] + 1 self.TIMEmax = xldate_from_date_tuple((year, 1, 1), 0) self.setup_xticklabels() self.ax1.axis([self.TIMEmin, self.TIMEmax, 0, self.NZGrid])
def set_time_scale(self): """Setup the time scale of the x-axis.""" if self.datemode.lower() == 'year': year = xldate_as_tuple(self.TIMEmin, 0)[0] self.TIMEmin = xldate_from_date_tuple((year, 1, 1), 0) last_month = xldate_as_tuple(self.TIMEmax, 0)[1] == 1 last_day = xldate_as_tuple(self.TIMEmax, 0)[2] == 1 if last_month and last_day: pass else: year = xldate_as_tuple(self.TIMEmax, 0)[0] + 1 self.TIMEmax = xldate_from_date_tuple((year, 1, 1), 0) self.setup_xticklabels() self.ax1.axis([self.TIMEmin, self.TIMEmax, 0, self.NZGrid])
def best_fit_time(self, TIME): # ========================================= # ----- Data Start ----- date0 = xldate_as_tuple(TIME[0], 0) date0 = (date0[0], date0[1], 1) self.TIMEmin = xldate_from_date_tuple(date0, 0) # ----- Date End ----- date1 = xldate_as_tuple(TIME[-1], 0) year = date1[0] month = date1[1] + 1 if month > 12: month = 1 year += 1 date1 = (year, month, 1) self.TIMEmax = xldate_from_date_tuple(date1, 0) return date0, date1
def best_fit_time(self, TIME): # ========================================= # ----- Data Start ----- date0 = xldate_as_tuple(TIME[0], 0) date0 = (date0[0], date0[1], 1) self.TIMEmin = xldate_from_date_tuple(date0, 0) # ----- Date End ----- date1 = xldate_as_tuple(TIME[-1], 0) year = date1[0] month = date1[1] + 1 if month > 12: month = 1 year += 1 date1 = (year, month, 1) self.TIMEmax = xldate_from_date_tuple(date1, 0) return date0, date1
def make_xticks_info(self): # ---------------------------------------- horizontal text alignment -- # The strategy here is to: # 1. render some random text ; # 2. get the height of its bounding box ; # 3. get the horizontal translation of the top-right corner after a # rotation of the bbox of 45 degrees ; # 4. sclale the length calculated in step 3 to the height to width # ratio of the axe ; # 5. convert the lenght calculated in axes coord. to the data coord. # system ; # 6. remove the random text from the figure. # Random text bbox height : dummytxt = self.ax1.text(0.5, 0.5, 'some_dummy_text', fontsize=10, ha='right', va='top', transform=self.ax1.transAxes) renderer = self.canvas.get_renderer() bbox = dummytxt.get_window_extent(renderer) bbox = bbox.transformed(self.ax1.transAxes.inverted()) # Horiz. trans. of bbox top-right corner : dx = bbox.height * np.sin(np.radians(45)) # Scale dx to axe dimension : bbox = self.ax1.get_window_extent(renderer) # in pixels bbox = bbox.transformed(self.dpi_scale_trans.inverted()) # in inches sdx = dx * bbox.height / bbox.width sdx *= (self.TIMEmax - self.TIMEmin + 1) dummytxt.remove() # Transform to data coord : n = self.date_labels_pattern month_names = LabelDatabase(self.language).month_names xticks_labels_offset = sdx xticks_labels = [] xticks_position = [self.TIMEmin] xticks_labels_position = [] if self.datemode.lower() == 'month': i = 0 while xticks_position[i] < self.TIMEmax: year = xldate_as_tuple(xticks_position[i], 0)[0] month = xldate_as_tuple(xticks_position[i], 0)[1] month_range = monthrange(year, month)[1] xticks_position.append(xticks_position[i] + month_range) if i % n == 0: xticks_labels_position.append(xticks_position[i] + 0.5 * month_range + xticks_labels_offset) xticks_labels.append("%s '%s" % (month_names[month - 1], str(year)[-2:])) i += 1 elif self.datemode.lower() == 'year': i = 0 year = xldate_as_tuple(xticks_position[i], 0)[0] while xticks_position[i] < self.TIMEmax: xticks_position.append( xldate_from_date_tuple((year+1, 1, 1), 0)) year_range = xticks_position[i+1] - xticks_position[i] if i % n == 0: xticks_labels_position.append(xticks_position[i] + 0.5 * year_range + xticks_labels_offset) xticks_labels.append("%d" % year) year += 1 i += 1 return xticks_position, xticks_labels_position, xticks_labels
def read_weather_datafile(filename): df = {'filename': filename, 'Station Name': '', 'Latitude': 0, 'Longitude': 0, 'Province': '', 'Elevation': 0, 'Climate Identifier': '', 'Year': np.array([]), 'Month': np.array([]), 'Day': np.array([]), 'Time': np.array([]), 'Tmax': np.array([]), 'Tavg': np.array([]), 'Tmin': np.array([]), 'Ptot': np.array([]), 'Rain': None, 'Snow': None, 'PET': None, } # Get info from header and grab the data from the file. reader = open_weather_datafile(filename) if reader is None: # pragma: no cover return else: for i, row in enumerate(reader): if len(row) == 0: continue if row[0] in ['Station Name', 'Province', 'Climate Identifier']: df[row[0]] = str(row[1]) elif row[0] in ['Latitude', 'Longitude', 'Elevation']: try: df[row[0]] = float(row[1]) except ValueError: print('Wrong format for entry "%s".' % row[0]) df[row[0]] = 0 elif row[0] == 'Year': istart = i+1 var = row data = np.array(reader[istart:]).astype('float') break data = clean_endsof_file(data) df['Year'] = data[:, var.index('Year')].astype(int) df['Month'] = data[:, var.index('Month')].astype(int) df['Day'] = data[:, var.index('Day')].astype(int) df['Tmax'] = data[:, var.index('Max Temp (deg C)')].astype(float) df['Tmin'] = data[:, var.index('Min Temp (deg C)')].astype(float) df['Tavg'] = data[:, var.index('Mean Temp (deg C)')].astype(float) df['Ptot'] = data[:, var.index('Total Precip (mm)')].astype(float) try: df['Time'] = data[:, var.index('Time')] except ValueError: # The time is not saved in the datafile. We need to calculate it from # the Year, Month, and Day arrays. df['Time'] = np.zeros(len(df['Year'])) for i in range(len(df['Year'])): dtuple = (df['Year'][i], df['Month'][i], df['Day'][i]) df['Time'][i] = xldate_from_date_tuple(dtuple, 0) try: df['PET'] = data[:, var.index('ETP (mm)')] print('Potential evapotranspiration imported from datafile.') except ValueError: pass try: df['Rain'] = data[:, var.index('Rain (mm)')] print('Rain data imported from datafile.') except ValueError: pass try: df['Snow'] = data[:, var.index('Snow (mm)')] print('Snow data imported from datafile.') except ValueError: pass return df
def make_xticks_info(self): # ---------------------------------------- horizontal text alignment -- # The strategy here is to: # 1. render some random text ; # 2. get the height of its bounding box ; # 3. get the horizontal translation of the top-right corner after a # rotation of the bbox of 45 degrees ; # 4. sclale the length calculated in step 3 to the height to width # ratio of the axe ; # 5. convert the lenght calculated in axes coord. to the data coord. # system ; # 6. remove the random text from the figure. # Random text bbox height : dummytxt = self.ax1.text(0.5, 0.5, 'some_dummy_text', fontsize=10, ha='right', va='top', transform=self.ax1.transAxes) renderer = self.canvas.get_renderer() bbox = dummytxt.get_window_extent(renderer) bbox = bbox.transformed(self.ax1.transAxes.inverted()) # Horiz. trans. of bbox top-right corner : dx = bbox.height * np.sin(np.radians(45)) # Scale dx to axe dimension : bbox = self.ax1.get_window_extent(renderer) # in pixels bbox = bbox.transformed(self.dpi_scale_trans.inverted()) # in inches sdx = dx * bbox.height / bbox.width sdx *= (self.TIMEmax - self.TIMEmin + 1) dummytxt.remove() # Transform to data coord : n = self.date_labels_pattern month_names = LabelDatabase(self.language).month_names xticks_labels_offset = sdx xticks_labels = [] xticks_position = [self.TIMEmin] xticks_labels_position = [] if self.datemode.lower() == 'month': i = 0 while xticks_position[i] < self.TIMEmax: year = xldate_as_tuple(xticks_position[i], 0)[0] month = xldate_as_tuple(xticks_position[i], 0)[1] month_range = monthrange(year, month)[1] xticks_position.append(xticks_position[i] + month_range) if i % n == 0: xticks_labels_position.append(xticks_position[i] + 0.5 * month_range + xticks_labels_offset) xticks_labels.append("%s '%s" % (month_names[month - 1], str(year)[-2:])) i += 1 elif self.datemode.lower() == 'year': i = 0 year = xldate_as_tuple(xticks_position[i], 0)[0] while xticks_position[i] < self.TIMEmax: xticks_position.append( xldate_from_date_tuple((year+1, 1, 1), 0)) year_range = xticks_position[i+1] - xticks_position[i] if i % n == 0: xticks_labels_position.append(xticks_position[i] + 0.5 * year_range + xticks_labels_offset) xticks_labels.append("%d" % year) year += 1 i += 1 return xticks_position, xticks_labels_position, xticks_labels
def read_weather_datafile(filename): df = { 'filename': filename, 'Station Name': '', 'Latitude': 0, 'Longitude': 0, 'Province': '', 'Elevation': 0, 'Climate Identifier': '', 'Year': np.array([]), 'Month': np.array([]), 'Day': np.array([]), 'Time': np.array([]), 'Tmax': np.array([]), 'Tavg': np.array([]), 'Tmin': np.array([]), 'Ptot': np.array([]), 'Rain': None, 'Snow': None, 'PET': None, } # Get info from header and grab the data from the file. reader = open_weather_datafile(filename) if reader is None: # pragma: no cover return else: for i, row in enumerate(reader): if len(row) == 0: continue if row[0] in ['Station Name', 'Province', 'Climate Identifier']: df[row[0]] = str(row[1]) elif row[0] in ['Latitude', 'Longitude', 'Elevation']: try: df[row[0]] = float(row[1]) except ValueError: print('Wrong format for entry "%s".' % row[0]) df[row[0]] = 0 elif row[0] == 'Year': istart = i + 1 var = row data = np.array(reader[istart:]).astype('float') break data = clean_endsof_file(data) df['Year'] = data[:, var.index('Year')].astype(int) df['Month'] = data[:, var.index('Month')].astype(int) df['Day'] = data[:, var.index('Day')].astype(int) df['Tmax'] = data[:, var.index('Max Temp (deg C)')].astype(float) df['Tmin'] = data[:, var.index('Min Temp (deg C)')].astype(float) df['Tavg'] = data[:, var.index('Mean Temp (deg C)')].astype(float) df['Ptot'] = data[:, var.index('Total Precip (mm)')].astype(float) try: df['Time'] = data[:, var.index('Time')] except ValueError: # The time is not saved in the datafile. We need to calculate it from # the Year, Month, and Day arrays. df['Time'] = np.zeros(len(df['Year'])) for i in range(len(df['Year'])): dtuple = (df['Year'][i], df['Month'][i], df['Day'][i]) df['Time'][i] = xldate_from_date_tuple(dtuple, 0) try: df['PET'] = data[:, var.index('ETP (mm)')] print('Potential evapotranspiration imported from datafile.') except ValueError: pass try: df['Rain'] = data[:, var.index('Rain (mm)')] print('Rain data imported from datafile.') except ValueError: pass try: df['Snow'] = data[:, var.index('Snow (mm)')] print('Snow data imported from datafile.') except ValueError: pass return df
def encode(self, data_source, **attr): """ Export data as a Microsoft Excel spreadsheet @param data_source: the source of the data that is to be encoded as a spreadsheet. This may be: resource: the resource item: a list of pre-fetched values the headings are in the first row the data types are in the second row @param attr: dictionary of parameters: * title: The main title of the report * list_fields: Fields to include in list views * report_groupby: Used to create a grouping of the result: either a Field object of the resource or a string which matches a value in the heading * use_colour: True to add colour to the cells. default False """ import datetime try: import xlwt except ImportError: current.session.error = self.ERROR.XLWT_ERROR redirect(URL(extension="")) try: from xlrd.xldate import xldate_from_date_tuple, \ xldate_from_time_tuple, \ xldate_from_datetime_tuple except ImportError: current.session.error = self.ERROR.XLRD_ERROR redirect(URL(extension="")) # The xlwt library supports a maximum of 182 character in a single cell max_cell_size = 182 # Get the attributes title = attr.get("title") list_fields = attr.get("list_fields") report_groupby = attr.get("report_groupby") use_colour = attr.get("use_colour", False) # Extract the data from the data_source if isinstance(data_source, (list, tuple)): headers = data_source[0] types = data_source[1] items = data_source[2:] else: (title, types, headers, items) = self.extractResource(data_source, list_fields, report_groupby) if len(headers) != len(items): import sys print >> sys.stderr, "modules/s3/codecs/xls: There is an error in the list_items, a field doesn't exist" print >> sys.stderr, list_fields if report_groupby != None: if isinstance(report_groupby, Field): groupby_label = report_groupby.label else: groupby_label = report_groupby # Date/Time formats from L10N deployment settings settings = current.deployment_settings date_format = S3XLS.dt_format_translate(settings.get_L10n_date_format()) time_format = S3XLS.dt_format_translate(settings.get_L10n_time_format()) datetime_format = S3XLS.dt_format_translate(settings.get_L10n_datetime_format()) # Initialize output output = StringIO() # Create the workbook and a sheet in it book = xlwt.Workbook(encoding="utf-8") # The spreadsheet doesn't like a / in the sheet name, so replace any with a space sheet1 = book.add_sheet(str(title.replace("/"," "))) # Styles styleLargeHeader = xlwt.XFStyle() styleLargeHeader.font.bold = True styleLargeHeader.font.height = 400 if use_colour: styleLargeHeader.alignment.horz = styleLargeHeader.alignment.HORZ_CENTER styleLargeHeader.pattern.pattern = styleLargeHeader.pattern.SOLID_PATTERN styleLargeHeader.pattern.pattern_fore_colour = S3XLS.LARGE_HEADER_COLOUR styleNotes = xlwt.XFStyle() styleNotes.font.italic = True styleNotes.font.height = 160 # 160 Twips = 8point styleNotes.num_format_str = datetime_format styleHeader = xlwt.XFStyle() styleHeader.font.bold = True styleHeader.num_format_str = datetime_format if use_colour: styleHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN styleHeader.pattern.pattern_fore_colour = S3XLS.HEADER_COLOUR styleSubHeader = xlwt.XFStyle() styleSubHeader.font.bold = True if use_colour: styleSubHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN styleSubHeader.pattern.pattern_fore_colour = S3XLS.SUB_HEADER_COLOUR styleOdd = xlwt.XFStyle() if use_colour: styleOdd.pattern.pattern = styleOdd.pattern.SOLID_PATTERN styleOdd.pattern.pattern_fore_colour = S3XLS.ROW_ALTERNATING_COLOURS[0] styleEven = xlwt.XFStyle() if use_colour: styleEven.pattern.pattern = styleEven.pattern.SOLID_PATTERN styleEven.pattern.pattern_fore_colour = S3XLS.ROW_ALTERNATING_COLOURS[1] # Initialize counters rowCnt = 0 colCnt = 0 # Environment request = current.request # Title row totalCols = len(headers)-1 if report_groupby != None: totalCols -= 1 if totalCols > 0: sheet1.write_merge(rowCnt, rowCnt, 0, totalCols, str(title), styleLargeHeader) currentRow = sheet1.row(rowCnt) currentRow.height = 440 rowCnt += 1 currentRow = sheet1.row(rowCnt) currentRow.write(totalCols, request.now, styleNotes) rowCnt += 1 currentRow = sheet1.row(rowCnt) # Header row fieldWidth=[] for label in headers: if report_groupby != None: if label == groupby_label: continue currentRow.write(colCnt, str(label), styleHeader) width = len(label) * S3XLS.COL_WIDTH_MULTIPLIER fieldWidth.append(width) sheet1.col(colCnt).width = width colCnt += 1 # fix the size of the last column to display the date if 16 * S3XLS.COL_WIDTH_MULTIPLIER > width: sheet1.col(totalCols).width = 16 * S3XLS.COL_WIDTH_MULTIPLIER subheading = None for item in items: # Item details rowCnt += 1 currentRow = sheet1.row(rowCnt) colCnt = 0 if rowCnt % 2 == 0: style = styleEven else: style = styleOdd for represent in item: label = headers[colCnt] if type(represent) is not str: represent = unicode(represent) if len(represent) > max_cell_size: represent = represent[:max_cell_size] # Strip away markup from representation try: markup = etree.XML(str(represent)) text = markup.xpath(".//text()") if text: text = " ".join(text) else: text = "" represent = text except: pass if report_groupby != None: if label == groupby_label: if subheading != represent: subheading = represent sheet1.write_merge(rowCnt, rowCnt, 0, totalCols, represent, styleSubHeader) rowCnt += 1 currentRow = sheet1.row(rowCnt) if rowCnt % 2 == 0: style = styleEven else: style = styleOdd continue coltype=types[colCnt] value = represent if coltype == "date": try: format = str(settings.get_L10n_date_format()) cell_datetime = datetime.datetime.strptime(value, format) date_tuple = (cell_datetime.year, cell_datetime.month, cell_datetime.day ) value = xldate_from_date_tuple(date_tuple, 0) style.num_format_str = date_format except: pass elif coltype == "datetime": try: format = str(settings.get_L10n_date_format()) cell_datetime = datetime.datetime.strptime(value, format) date_tuple = (cell_datetime.year, cell_datetime.month, cell_datetime.day, cell_datetime.hour, cell_datetime.minute, cell_datetime.second, ) value = xldate_from_datetime_tuple(date_tuple, 0) style.num_format_str = datetime_format except: pass elif coltype == "time": try: format = str(settings.get_L10n_date_format()) cell_datetime = datetime.datetime.strptime(value, format) date_tuple = (cell_datetime.hour, cell_datetime.minute, cell_datetime.second, ) value = xldate_from_time_tuple(date_tuple) style.num_format_str = time_format except: pass elif coltype == "integer": try: value = int(value) style.num_format_str = "0" except: pass elif coltype == "double": try: value = float(value) style.num_format_str = "0.00" except: pass currentRow.write(colCnt, value, style) width = len(represent) * S3XLS.COL_WIDTH_MULTIPLIER if width > fieldWidth[colCnt]: fieldWidth[colCnt] = width sheet1.col(colCnt).width = width colCnt += 1 sheet1.panes_frozen = True sheet1.horz_split_pos = 3 book.save(output) # Response headers filename = "%s_%s.xls" % (request.env.server_name, str(title)) disposition = "attachment; filename=\"%s\"" % filename response = current.response response.headers["Content-Type"] = contenttype(".xls") response.headers["Content-disposition"] = disposition output.seek(0) return output.read()
def plot_rmse_vs_time(Ymes, Ypre, Time, Date, name): fw, fh = 6, 6 fig = mpl.figure.Figure(figsize=(fw, fh), facecolor='white') canvas = FigureCanvas(fig) # ---- Create Axes leftMargin = 0.75 / fw rightMargin = 0.75 / fw bottomMargin = 0.75 / fh topMargin = 0.75 / fh x0, y0 = leftMargin, bottomMargin w0 = 1 - (leftMargin + rightMargin) h0 = 1 - (bottomMargin + topMargin) ax0 = fig.add_axes([x0, y0, w0, h0], polar=True) # ---- Plot Data # Estimation Error Yerr = np.abs(Ypre - Ymes) Time *= 2 * np.pi / 365. c = '0.4' ax0.plot(Time, Yerr, '.', mec=c, mfc=c, ms=15, alpha=0.5) # RMSE Polygon Months = Date[1] RMSE = np.zeros(12) mfd = np.zeros(12) for m in range(12): mfd[m] = (xldate_from_date_tuple((2000, m+1, 1), 0) - xldate_from_date_tuple((2000, 1, 1), 0)) indx = np.where(Months == m+1)[0] RMSE[m] = (np.mean(Yerr[indx] ** 2)) ** 0.5 # Transform first day of the month to radians mfd = mfd * 2 * np.pi / 365. # Add first point at the end to close the polygon mfd = np.append(mfd, mfd[0]) RMSE = np.append(RMSE, RMSE[0]) ax0.plot(mfd, RMSE * 5, ls='--', c='red', lw=2, mec='b', mew=3, mfc='b', ms=10, dash_capstyle='round', dash_joinstyle='round') # ---- Labels ax0.tick_params(axis='both', direction='out', labelsize=16) ax0.set_xticklabels(['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']) ax0.set_xticks(mfd) ax0.set_yticklabels([]) ax0.set_yticks([]) ax0.set_rmax(1.1 * np.max(Yerr)) # ax0.set_rgrids([10,20,30,40,50,60,70,80,90], angle=345.) # ---- Draw fig.savefig(name + '_polar_error.pdf') canvas.show()
def encode(self, data_source, **attr): """ Export data as a Microsoft Excel spreadsheet @param data_source: the source of the data that is to be encoded as a spreadsheet. This may be: resource: the resource item: a list of pre-fetched values the headings are in the first row the data types are in the second row @param attr: dictionary of parameters: * title: The main title of the report * list_fields: Fields to include in list views * report_groupby: Used to create a grouping of the result: either a Field object of the resource or a string which matches a value in the heading * use_colour: True to add colour to the cells. default False """ import datetime try: import xlwt except ImportError: current.session.error = self.ERROR.XLWT_ERROR redirect(URL(extension="")) try: from xlrd.xldate import xldate_from_date_tuple, \ xldate_from_time_tuple, \ xldate_from_datetime_tuple except ImportError: current.session.error = self.ERROR.XLRD_ERROR redirect(URL(extension="")) request = current.request # The xlwt library supports a maximum of 182 characters in a single cell max_cell_size = 182 COL_WIDTH_MULTIPLIER = S3XLS.COL_WIDTH_MULTIPLIER # Get the attributes title = attr.get("title") list_fields = attr.get("list_fields") group = attr.get("dt_group") report_groupby = list_fields[group] if group else None use_colour = attr.get("use_colour", False) # Extract the data from the data_source if isinstance(data_source, (list, tuple)): headers = data_source[0] types = data_source[1] items = data_source[2:] else: (title, types, lfields, headers, items) = self.extractResource(data_source, list_fields) report_groupby = lfields[group] if group else None if len(items) > 0 and len(headers) != len(items[0]): from ..s3utils import s3_debug msg = """modules/s3/codecs/xls: There is an error in the list_items, a field doesn't exist" requesting url %s Headers = %d, Data Items = %d Headers %s List Fields %s""" % (request.url, len(headers), len(items[0]), headers, list_fields) s3_debug(msg) groupby_label = headers[report_groupby] if report_groupby else None # Date/Time formats from L10N deployment settings settings = current.deployment_settings date_format = S3XLS.dt_format_translate(settings.get_L10n_date_format()) time_format = S3XLS.dt_format_translate(settings.get_L10n_time_format()) datetime_format = S3XLS.dt_format_translate(settings.get_L10n_datetime_format()) # Create the workbook book = xlwt.Workbook(encoding="utf-8") # Add a sheet # Can't have a / in the sheet_name, so replace any with a space sheet_name = str(title.replace("/", " ")) # sheet_name cannot be over 31 chars if len(sheet_name) > 31: sheet_name = sheet_name[:31] sheet1 = book.add_sheet(sheet_name) # Styles styleLargeHeader = xlwt.XFStyle() styleLargeHeader.font.bold = True styleLargeHeader.font.height = 400 if use_colour: styleLargeHeader.alignment.horz = styleLargeHeader.alignment.HORZ_CENTER styleLargeHeader.pattern.pattern = styleLargeHeader.pattern.SOLID_PATTERN styleLargeHeader.pattern.pattern_fore_colour = S3XLS.LARGE_HEADER_COLOUR styleNotes = xlwt.XFStyle() styleNotes.font.italic = True styleNotes.font.height = 160 # 160 Twips = 8 point styleNotes.num_format_str = datetime_format styleHeader = xlwt.XFStyle() styleHeader.font.bold = True styleHeader.num_format_str = datetime_format if use_colour: styleHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN styleHeader.pattern.pattern_fore_colour = S3XLS.HEADER_COLOUR styleSubHeader = xlwt.XFStyle() styleSubHeader.font.bold = True if use_colour: styleSubHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN styleSubHeader.pattern.pattern_fore_colour = S3XLS.SUB_HEADER_COLOUR styleOdd = xlwt.XFStyle() if use_colour: styleOdd.pattern.pattern = styleOdd.pattern.SOLID_PATTERN styleOdd.pattern.pattern_fore_colour = S3XLS.ROW_ALTERNATING_COLOURS[0] styleEven = xlwt.XFStyle() if use_colour: styleEven.pattern.pattern = styleEven.pattern.SOLID_PATTERN styleEven.pattern.pattern_fore_colour = S3XLS.ROW_ALTERNATING_COLOURS[1] # Header row colCnt = -1 headerRow = sheet1.row(2) fieldWidth = [] for selector in lfields: if selector == report_groupby: continue label = headers[selector] if label == "Id": fieldWidth.append(0) continue if label == "Sort": continue colCnt += 1 headerRow.write(colCnt, str(label), styleHeader) width = min(len(label) * COL_WIDTH_MULTIPLIER, 2000) fieldWidth.append(width) sheet1.col(colCnt).width = width # Title row currentRow = sheet1.row(0) if colCnt > 0: sheet1.write_merge(0, 0, 0, colCnt, str(title), styleLargeHeader) currentRow.height = 500 currentRow = sheet1.row(1) currentRow.write(0, str(current.T("Date Exported:")), styleNotes) currentRow.write(1, request.now, styleNotes) # Fix the size of the last column to display the date if 16 * COL_WIDTH_MULTIPLIER > width: sheet1.col(colCnt).width = 16 * COL_WIDTH_MULTIPLIER # Initialize counters totalCols = colCnt rowCnt = 2 colCnt = 0 subheading = None for item in items: # Item details rowCnt += 1 currentRow = sheet1.row(rowCnt) colCnt = 0 if rowCnt % 2 == 0: style = styleEven else: style = styleOdd if report_groupby: represent = s3_strip_markup(s3_unicode(item[report_groupby])) if subheading != represent: subheading = represent sheet1.write_merge(rowCnt, rowCnt, 0, totalCols, subheading, styleSubHeader) rowCnt += 1 currentRow = sheet1.row(rowCnt) if rowCnt % 2 == 0: style = styleEven else: style = styleOdd for field in lfields: label = headers[field] if label == groupby_label: continue if label == "Id": colCnt += 1 continue represent = s3_strip_markup(s3_unicode(item[field])) coltype = types[colCnt] if coltype == "sort": continue if len(represent) > max_cell_size: represent = represent[:max_cell_size] value = represent if coltype == "date": try: format = str(settings.get_L10n_date_format()) cell_datetime = datetime.datetime.strptime(value, format) date_tuple = (cell_datetime.year, cell_datetime.month, cell_datetime.day) value = xldate_from_date_tuple(date_tuple, 0) style.num_format_str = date_format except: pass elif coltype == "datetime": try: format = str(settings.get_L10n_date_format()) cell_datetime = datetime.datetime.strptime(value, format) date_tuple = (cell_datetime.year, cell_datetime.month, cell_datetime.day, cell_datetime.hour, cell_datetime.minute, cell_datetime.second) value = xldate_from_datetime_tuple(date_tuple, 0) style.num_format_str = datetime_format except: pass elif coltype == "time": try: format = str(settings.get_L10n_date_format()) cell_datetime = datetime.datetime.strptime(value, format) date_tuple = (cell_datetime.hour, cell_datetime.minute, cell_datetime.second) value = xldate_from_time_tuple(date_tuple) style.num_format_str = time_format except: pass elif coltype == "integer": try: value = int(value) style.num_format_str = "0" except: pass elif coltype == "double": try: value = float(value) style.num_format_str = "0.00" except: pass currentRow.write(colCnt - 1, value, style) width = len(represent) * COL_WIDTH_MULTIPLIER if width > fieldWidth[colCnt]: fieldWidth[colCnt] = width sheet1.col(colCnt - 1).width = width colCnt += 1 sheet1.panes_frozen = True sheet1.horz_split_pos = 3 output = StringIO() book.save(output) # Response headers filename = "%s_%s.xls" % (request.env.server_name, str(title)) disposition = "attachment; filename=\"%s\"" % filename response = current.response response.headers["Content-Type"] = contenttype(".xls") response.headers["Content-disposition"] = disposition output.seek(0) return output.read()
def encode(self, data_source, title=None, as_stream=False, **attr): """ Export data as a Microsoft Excel spreadsheet @param data_source: the source of the data that is to be encoded as a spreadsheet, can be either of: 1) an S3Resource 2) an array of value dicts (dict of column labels as first item, list of field types as second item) 3) a dict like: {columns: [key, ...], headers: {key: label}, types: {key: type}, rows: [{key:value}], } @param title: the title for the output document @param as_stream: return the buffer (StringIO) rather than its contents (str), useful when the output is supposed to be stored locally @param attr: keyword parameters @keyword title: the main title of the report @keyword list_fields: fields to include in list views @keyword report_groupby: used to create a grouping of the result: either a Field object of the resource or a string which matches a value in the heading @keyword use_colour: True to add colour to the cells, default False @keyword evenodd: render different background colours for even/odd rows ("stripes") """ # Do not redirect from here! # ...but raise proper status code, which can be caught by caller try: import xlwt except ImportError: error = self.ERROR.XLWT_ERROR current.log.error(error) raise HTTP(503, body=error) try: from xlrd.xldate import xldate_from_date_tuple, \ xldate_from_time_tuple, \ xldate_from_datetime_tuple except ImportError: error = self.ERROR.XLRD_ERROR current.log.error(error) raise HTTP(503, body=error) import datetime MAX_CELL_SIZE = self.MAX_CELL_SIZE COL_WIDTH_MULTIPLIER = self.COL_WIDTH_MULTIPLIER # Get the attributes title = attr.get("title") if title is None: title = current.T("Report") list_fields = attr.get("list_fields") group = attr.get("dt_group") use_colour = attr.get("use_colour", False) evenodd = attr.get("evenodd", True) # Extract the data from the data_source if isinstance(data_source, dict): headers = data_source.get("headers", {}) lfields = data_source.get("columns", list_fields) column_types = data_source.get("types") types = [column_types[col] for col in lfields] rows = data_source.get("rows") elif isinstance(data_source, (list, tuple)): headers = data_source[0] types = data_source[1] rows = data_source[2:] lfields = list_fields else: if not list_fields: list_fields = data_source.list_fields() (title, types, lfields, headers, rows) = self.extract(data_source, list_fields, ) # Verify columns in items request = current.request if len(rows) > 0 and len(lfields) > len(rows[0]): msg = """modules/s3/codecs/xls: There is an error in the list items, a field doesn't exist requesting url %s Headers = %d, Data Items = %d Headers %s List Fields %s""" % (request.url, len(lfields), len(rows[0]), headers, lfields) current.log.error(msg) # Grouping report_groupby = lfields[group] if group else None groupby_label = headers[report_groupby] if report_groupby else None # Date/Time formats from L10N deployment settings settings = current.deployment_settings date_format = settings.get_L10n_date_format() date_format_str = str(date_format) dt_format_translate = self.dt_format_translate date_format = dt_format_translate(date_format) time_format = dt_format_translate(settings.get_L10n_time_format()) datetime_format = dt_format_translate(settings.get_L10n_datetime_format()) title_row = settings.get_xls_title_row() # Get styles styles = self._styles(use_colour = use_colour, evenodd = evenodd, datetime_format = datetime_format, ) # Create the workbook book = xlwt.Workbook(encoding="utf-8") # Add sheets sheets = [] # XLS exports are limited to 65536 rows per sheet, we bypass # this by creating multiple sheets row_limit = 65536 sheetnum = len(rows) / row_limit # Can't have a / in the sheet_name, so replace any with a space sheet_name = str(title.replace("/", " ")) if len(sheet_name) > 31: # Sheet name cannot be over 31 chars # (take sheet number suffix into account) sheet_name = sheet_name[:31] if sheetnum == 1 else sheet_name[:28] count = 1 while len(sheets) <= sheetnum: sheets.append(book.add_sheet("%s-%s" % (sheet_name, count))) count += 1 if callable(title_row): # Calling with sheet None to get the number of title rows title_row_length = title_row(None) else: title_row_length = 2 # Add header row to all sheets, determine columns widths header_style = styles["header"] for sheet in sheets: # Move this down if a title row will be added if title_row: header_row = sheet.row(title_row_length) else: header_row = sheet.row(0) column_widths = [] has_id = False col_index = 0 for selector in lfields: if selector == report_groupby: continue label = headers[selector] if label == "Id": # Indicate to adjust col_index when writing out has_id = True column_widths.append(0) col_index += 1 continue if label == "Sort": continue if has_id: # Adjust for the skipped column write_col_index = col_index - 1 else: write_col_index = col_index header_row.write(write_col_index, str(label), header_style) width = max(len(label) * COL_WIDTH_MULTIPLIER, 2000) width = min(width, 65535) # USHRT_MAX column_widths.append(width) sheet.col(write_col_index).width = width col_index += 1 title = s3_str(title) # Title row (optional, deployment setting) if title_row: T = current.T large_header_style = styles["large_header"] notes_style = styles["notes"] for sheet in sheets: if callable(title_row): # Custom title rows title_row(sheet) else: # First row => Title (standard = "title_list" CRUD string) current_row = sheet.row(0) if col_index > 0: sheet.write_merge(0, 0, 0, col_index, title, large_header_style, ) current_row.height = 500 # Second row => Export date/time current_row = sheet.row(1) current_row.write(0, "%s:" % T("Date Exported"), notes_style) current_row.write(1, request.now, notes_style) # Fix the size of the last column to display the date if 16 * COL_WIDTH_MULTIPLIER > width: sheet.col(col_index).width = 16 * COL_WIDTH_MULTIPLIER # Initialize counters totalCols = col_index # Move the rows down if a title row is included if title_row: row_index = title_row_length else: row_index = 0 # Helper function to get the current row def get_current_row(row_count, row_limit): sheet_count = int(row_count / row_limit) row_number = row_count - (sheet_count * row_limit) if sheet_count > 0: row_number += 1 return sheets[sheet_count], sheets[sheet_count].row(row_number) # Write the table contents subheading = None odd_style = styles["odd"] even_style = styles["even"] subheader_style = styles["subheader"] for row in rows: # Current row row_index += 1 current_sheet, current_row = get_current_row(row_index, row_limit) style = even_style if row_index % 2 == 0 else odd_style # Group headers if report_groupby: represent = s3_strip_markup(s3_unicode(row[report_groupby])) if subheading != represent: # Start of new group - write group header subheading = represent current_sheet.write_merge(row_index, row_index, 0, totalCols, subheading, subheader_style, ) # Move on to next row row_index += 1 current_sheet, current_row = get_current_row(row_index, row_limit) style = even_style if row_index % 2 == 0 else odd_style col_index = 0 remaining_fields = lfields # Custom row style? row_style = None if "_style" in row: stylename = row["_style"] if stylename in styles: row_style = styles[stylename] # Group header/footer row? if "_group" in row: group_info = row["_group"] label = group_info.get("label") totals = group_info.get("totals") if label: label = s3_strip_markup(s3_unicode(label)) style = row_style or subheader_style span = group_info.get("span") if span == 0: current_sheet.write_merge(row_index, row_index, 0, totalCols - 1, label, style, ) if totals: # Write totals into the next row row_index += 1 current_sheet, current_row = \ get_current_row(row_index, row_limit) else: current_sheet.write_merge(row_index, row_index, 0, span - 1, label, style, ) col_index = span remaining_fields = lfields[span:] if not totals: continue for field in remaining_fields: label = headers[field] if label == groupby_label: continue if label == "Id": # Skip the ID column from XLS exports col_index += 1 continue if field not in row: represent = "" else: represent = s3_strip_markup(s3_unicode(row[field])) coltype = types[col_index] if coltype == "sort": continue if len(represent) > MAX_CELL_SIZE: represent = represent[:MAX_CELL_SIZE] value = represent if coltype == "date": try: cell_datetime = datetime.datetime.strptime(value, date_format_str) date_tuple = (cell_datetime.year, cell_datetime.month, cell_datetime.day) value = xldate_from_date_tuple(date_tuple, 0) style.num_format_str = date_format except: pass elif coltype == "datetime": try: cell_datetime = datetime.datetime.strptime(value, date_format_str) date_tuple = (cell_datetime.year, cell_datetime.month, cell_datetime.day, cell_datetime.hour, cell_datetime.minute, cell_datetime.second) value = xldate_from_datetime_tuple(date_tuple, 0) style.num_format_str = datetime_format except: pass elif coltype == "time": try: cell_datetime = datetime.datetime.strptime(value, date_format_str) date_tuple = (cell_datetime.hour, cell_datetime.minute, cell_datetime.second) value = xldate_from_time_tuple(date_tuple) style.num_format_str = time_format except: pass elif coltype == "integer": try: value = int(value) style.num_format_str = "0" except: pass elif coltype == "double": try: value = float(value) style.num_format_str = "0.00" except: pass if has_id: # Adjust for the skipped column write_col_index = col_index - 1 else: write_col_index = col_index current_row.write(write_col_index, value, style) width = len(represent) * COL_WIDTH_MULTIPLIER if width > column_widths[col_index]: column_widths[col_index] = width current_sheet.col(write_col_index).width = width col_index += 1 # Additional sheet settings for sheet in sheets: sheet.panes_frozen = True sheet.horz_split_pos = 1 # Write output output = StringIO() book.save(output) output.seek(0) if as_stream: return output # Response headers filename = "%s_%s.xls" % (request.env.server_name, title) disposition = "attachment; filename=\"%s\"" % filename response = current.response response.headers["Content-Type"] = contenttype(".xls") response.headers["Content-disposition"] = disposition return output.read()
def load_err_file(self): #=============================== Load err. File == with open(self.fname) as f: reader = list(csv.reader(f, delimiter='\t')) #---- Finds Station Info + First Row of Data ---- row = 0 while True: if row > 25: print('Something is wrong with the ' + 'formatting of the .err file') return try: if reader[row][0] == 'VARIABLE': break elif reader[row][0] == 'Station Name': self.staName = reader[row][1] elif reader[row][0] == 'Climate Identifier': self.climID = reader[row][1] except IndexError: pass row += 1 row += 1 #------------------------------------------------- Re-Organizes Data -- # Get unique weather variable names DATA = np.array(reader[row:]) self.varNames = np.unique(DATA[:, 0]) self.varTypes = ['continuous'] * (len(self.varNames)) # Splits data acoording to the weather variables found. self.Yp, self.Ym, self.Time, self.Date = [], [], [], [] for i, var in enumerate(self.varNames): indx = np.where(DATA[:, 0] == var)[0] self.Yp.append(DATA[indx, 7].astype(float)) self.Ym.append(DATA[indx, 8].astype(float)) y = DATA[indx, 1].astype(int) m = DATA[indx, 2].astype(int) d = DATA[indx, 3].astype(int) #---- Time ---- t = np.zeros(len(y)) for date in range(len(y)): t[date] = (xldate_from_date_tuple((y[date], m[date], d[date]), 0) - xldate_from_date_tuple((y[date], 1, 1), 0)) self.Time.append(t) self.Date.append([y, m, d]) #---- Weather Variable Type ---- # If the proportion of zeros in the data series is higher # than 25%, the data type is set as an event-based weather # variable. Otherwise, default value is kept and variable is # considered to be continuous in time. # # The precipitation (solid, liquid or total) is a good example of # an event-based variable, while air temperature (min, max or mean) # is a good example of a continuous variable. pc0 = len(np.where(self.Ym[i] == 0)[0]) / float(len(self.Ym[i])) if pc0 > 0.25: self.varTypes[i] = 'event-based' return
def encode(self, data_source, **attr): """ Export data as a Microsoft Excel spreadsheet @param data_source: the source of the data that is to be encoded as a spreadsheet. This may be: resource: the resource item: a list of pre-fetched values the headings are in the first row the data types are in the second row @param attr: dictionary of parameters: * title: The main title of the report * list_fields: Fields to include in list views * report_groupby: Used to create a grouping of the result: either a Field object of the resource or a string which matches a value in the heading * use_colour: True to add colour to the cells. default False """ try: import xlwt except ImportError: from ..s3rest import S3Request if current.auth.permission.format in S3Request.INTERACTIVE_FORMATS: current.session.error = self.ERROR.XLWT_ERROR redirect(URL(extension="")) else: error = self.ERROR.XLWT_ERROR current.log.error(error) return error try: from xlrd.xldate import xldate_from_date_tuple, \ xldate_from_time_tuple, \ xldate_from_datetime_tuple except ImportError: from ..s3rest import S3Request if current.auth.permission.format in S3Request.INTERACTIVE_FORMATS: current.session.error = self.ERROR.XLRD_ERROR redirect(URL(extension="")) else: error = self.ERROR.XLRD_ERROR current.log.error(error) return error import datetime request = current.request # The xlwt library supports a maximum of 182 characters in a single cell max_cell_size = 182 COL_WIDTH_MULTIPLIER = S3XLS.COL_WIDTH_MULTIPLIER # Get the attributes title = attr.get("title") list_fields = attr.get("list_fields") if not list_fields: list_fields = data_source.list_fields() group = attr.get("dt_group") use_colour = attr.get("use_colour", False) # Extract the data from the data_source if isinstance(data_source, (list, tuple)): headers = data_source[0] types = data_source[1] rows = data_source[2:] lfields = list_fields else: (title, types, lfields, headers, rows) = self.extractResource(data_source, list_fields) report_groupby = lfields[group] if group else None if len(rows) > 0 and len(headers) != len(rows[0]): msg = """modules/s3/codecs/xls: There is an error in the list_items, a field doesn't exist" requesting url %s Headers = %d, Data Items = %d Headers %s List Fields %s""" % (request.url, len(headers), len( items[0]), headers, list_fields) current.log.error(msg) groupby_label = headers[report_groupby] if report_groupby else None # Date/Time formats from L10N deployment settings settings = current.deployment_settings date_format = settings.get_L10n_date_format() date_format_str = str(date_format) date_format = S3XLS.dt_format_translate(date_format) time_format = S3XLS.dt_format_translate( settings.get_L10n_time_format()) datetime_format = S3XLS.dt_format_translate( settings.get_L10n_datetime_format()) # Create the workbook book = xlwt.Workbook(encoding="utf-8") # Add a sheet # Can't have a / in the sheet_name, so replace any with a space sheet_name = str(title.replace("/", " ")) # sheet_name cannot be over 31 chars if len(sheet_name) > 31: sheet_name = sheet_name[:31] sheets = [] rowLimit = 65536 #.xls exports are limited to 65536 rows per sheet, we bypass this by creating multiple sheets sheetnum = len(rows) / rowLimit count = 1 while len(sheets) <= sheetnum: sheets.append(book.add_sheet('%s-%s' % (sheet_name, count))) count += 1 # Styles styleLargeHeader = xlwt.XFStyle() styleLargeHeader.font.bold = True styleLargeHeader.font.height = 400 if use_colour: styleLargeHeader.alignment.horz = styleLargeHeader.alignment.HORZ_CENTER styleLargeHeader.pattern.pattern = styleLargeHeader.pattern.SOLID_PATTERN styleLargeHeader.pattern.pattern_fore_colour = S3XLS.LARGE_HEADER_COLOUR styleNotes = xlwt.XFStyle() styleNotes.font.italic = True styleNotes.font.height = 160 # 160 Twips = 8 point styleNotes.num_format_str = datetime_format styleHeader = xlwt.XFStyle() styleHeader.font.bold = True styleHeader.num_format_str = datetime_format if use_colour: styleHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN styleHeader.pattern.pattern_fore_colour = S3XLS.HEADER_COLOUR styleSubHeader = xlwt.XFStyle() styleSubHeader.font.bold = True if use_colour: styleSubHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN styleSubHeader.pattern.pattern_fore_colour = S3XLS.SUB_HEADER_COLOUR styleOdd = xlwt.XFStyle() if use_colour: styleOdd.pattern.pattern = styleOdd.pattern.SOLID_PATTERN styleOdd.pattern.pattern_fore_colour = S3XLS.ROW_ALTERNATING_COLOURS[ 0] styleEven = xlwt.XFStyle() if use_colour: styleEven.pattern.pattern = styleEven.pattern.SOLID_PATTERN styleEven.pattern.pattern_fore_colour = S3XLS.ROW_ALTERNATING_COLOURS[ 1] for sheet in sheets: # Header row colCnt = 0 # Move this down if a title row will be added if settings.get_xls_title_row(): headerRow = sheet.row(2) else: headerRow = sheet.row(0) fieldWidths = [] id = False for selector in lfields: if selector == report_groupby: continue label = headers[selector] if label == "Id": # Indicate to adjust colCnt when writing out id = True fieldWidths.append(0) colCnt += 1 continue if label == "Sort": continue if id: # Adjust for the skipped column writeCol = colCnt - 1 else: writeCol = colCnt headerRow.write(writeCol, str(label), styleHeader) width = max(len(label) * COL_WIDTH_MULTIPLIER, 2000) width = min(width, 65535) # USHRT_MAX fieldWidths.append(width) sheet.col(writeCol).width = width colCnt += 1 # Title row (optional, deployment setting) if settings.get_xls_title_row(): for sheet in sheets: # First row => Title (standard = "title_list" CRUD string) currentRow = sheet.row(0) if colCnt > 0: sheet.write_merge(0, 0, 0, colCnt, str(title), styleLargeHeader) currentRow.height = 500 # Second row => Export date/time currentRow = sheet.row(1) currentRow.write(0, str(current.T("Date Exported:")), styleNotes) currentRow.write(1, request.now, styleNotes) # Fix the size of the last column to display the date if 16 * COL_WIDTH_MULTIPLIER > width: sheet.col(colCnt).width = 16 * COL_WIDTH_MULTIPLIER # Initialize counters totalCols = colCnt # Move the rows down if a title row is included if settings.get_xls_title_row(): rowCnt = 2 else: rowCnt = 0 subheading = None for row in rows: # Item details rowCnt += 1 sheetCnt = (rowCnt / rowLimit) if sheetCnt == 0: currentRow = sheets[sheetCnt].row(rowCnt - (sheetCnt * rowLimit)) else: currentRow = sheets[sheetCnt].row(rowCnt - (sheetCnt * rowLimit) + 1) colCnt = 0 if rowCnt % 2 == 0: style = styleEven else: style = styleOdd if report_groupby: represent = s3_strip_markup(s3_unicode(row[report_groupby])) if subheading != represent: subheading = represent sheets[sheetCnt].write_merge(rowCnt, rowCnt, 0, totalCols, subheading, styleSubHeader) rowCnt += 1 currentRow = sheets[sheetCnt].row(rowCnt) if rowCnt % 2 == 0: style = styleEven else: style = styleOdd for field in lfields: label = headers[field] if label == groupby_label: continue if label == "Id": # Skip the ID column from XLS exports colCnt += 1 continue represent = s3_strip_markup(s3_unicode(row[field])) coltype = types[colCnt] if coltype == "sort": continue if len(represent) > max_cell_size: represent = represent[:max_cell_size] value = represent if coltype == "date": try: cell_datetime = datetime.datetime.strptime( value, date_format_str) date_tuple = (cell_datetime.year, cell_datetime.month, cell_datetime.day) value = xldate_from_date_tuple(date_tuple, 0) style.num_format_str = date_format except: pass elif coltype == "datetime": try: cell_datetime = datetime.datetime.strptime( value, date_format_str) date_tuple = (cell_datetime.year, cell_datetime.month, cell_datetime.day, cell_datetime.hour, cell_datetime.minute, cell_datetime.second) value = xldate_from_datetime_tuple(date_tuple, 0) style.num_format_str = datetime_format except: pass elif coltype == "time": try: cell_datetime = datetime.datetime.strptime( value, date_format_str) date_tuple = (cell_datetime.hour, cell_datetime.minute, cell_datetime.second) value = xldate_from_time_tuple(date_tuple) style.num_format_str = time_format except: pass elif coltype == "integer": try: value = int(value) style.num_format_str = "0" except: pass elif coltype == "double": try: value = float(value) style.num_format_str = "0.00" except: pass if id: # Adjust for the skipped column writeCol = colCnt - 1 else: writeCol = colCnt currentRow.write(writeCol, value, style) width = len(represent) * COL_WIDTH_MULTIPLIER if width > fieldWidths[colCnt]: fieldWidths[colCnt] = width sheets[sheetCnt].col(writeCol).width = width colCnt += 1 for sheet in sheets: sheet.panes_frozen = True sheet.horz_split_pos = 1 output = StringIO() book.save(output) # Response headers filename = "%s_%s.xls" % (request.env.server_name, str(title)) disposition = "attachment; filename=\"%s\"" % filename response = current.response response.headers["Content-Type"] = contenttype(".xls") response.headers["Content-disposition"] = disposition output.seek(0) return output.read()
def encode(self, data_source, **attr): """ Export data as a Microsoft Excel spreadsheet @param data_source: the source of the data that is to be encoded as a spreadsheet. This may be: resource: the resource item: a list of pre-fetched values the headings are in the first row the data types are in the second row @param attr: dictionary of parameters: * title: The main title of the report * list_fields: Fields to include in list views * report_groupby: Used to create a grouping of the result: either a Field object of the resource or a string which matches a value in the heading * use_colour: True to add colour to the cells. default False """ import datetime try: import xlwt except ImportError: current.session.error = self.ERROR.XLWT_ERROR redirect(URL(extension="")) try: from xlrd.xldate import xldate_from_date_tuple, \ xldate_from_time_tuple, \ xldate_from_datetime_tuple except ImportError: current.session.error = self.ERROR.XLRD_ERROR redirect(URL(extension="")) # Environment request = current.request # The xlwt library supports a maximum of 182 character in a single cell max_cell_size = 182 # Get the attributes title = attr.get("title") list_fields = attr.get("list_fields") report_groupby = attr.get("report_groupby") use_colour = attr.get("use_colour", False) # Extract the data from the data_source if isinstance(data_source, (list, tuple)): headers = data_source[0] types = data_source[1] items = data_source[2:] else: (title, types, headers, items) = self.extractResource(data_source, list_fields, report_groupby) if len(items) > 0 and len(headers) != len(items[0]): from ..s3utils import s3_debug msg = """modules/s3/codecs/xls: There is an error in the list_items, a field doesn't exist" requesting url %s Headers = %d, Data Items = %d Headers %s List Fields %s""" % (request.url, len(headers), len( items[0]), headers, list_fields) s3_debug(msg) if report_groupby != None: if isinstance(report_groupby, Field): groupby_label = report_groupby.label else: groupby_label = report_groupby # Date/Time formats from L10N deployment settings settings = current.deployment_settings date_format = S3XLS.dt_format_translate( settings.get_L10n_date_format()) time_format = S3XLS.dt_format_translate( settings.get_L10n_time_format()) datetime_format = S3XLS.dt_format_translate( settings.get_L10n_datetime_format()) # Initialize output output = StringIO() # Create the workbook and a sheet in it book = xlwt.Workbook(encoding="utf-8") # The spreadsheet doesn't like a / in the sheet name, so replace any with a space sheet1 = book.add_sheet(str(title.replace("/", " "))) # Styles styleLargeHeader = xlwt.XFStyle() styleLargeHeader.font.bold = True styleLargeHeader.font.height = 400 if use_colour: styleLargeHeader.alignment.horz = styleLargeHeader.alignment.HORZ_CENTER styleLargeHeader.pattern.pattern = styleLargeHeader.pattern.SOLID_PATTERN styleLargeHeader.pattern.pattern_fore_colour = S3XLS.LARGE_HEADER_COLOUR styleNotes = xlwt.XFStyle() styleNotes.font.italic = True styleNotes.font.height = 160 # 160 Twips = 8point styleNotes.num_format_str = datetime_format styleHeader = xlwt.XFStyle() styleHeader.font.bold = True styleHeader.num_format_str = datetime_format if use_colour: styleHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN styleHeader.pattern.pattern_fore_colour = S3XLS.HEADER_COLOUR styleSubHeader = xlwt.XFStyle() styleSubHeader.font.bold = True if use_colour: styleSubHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN styleSubHeader.pattern.pattern_fore_colour = S3XLS.SUB_HEADER_COLOUR styleOdd = xlwt.XFStyle() if use_colour: styleOdd.pattern.pattern = styleOdd.pattern.SOLID_PATTERN styleOdd.pattern.pattern_fore_colour = S3XLS.ROW_ALTERNATING_COLOURS[ 0] styleEven = xlwt.XFStyle() if use_colour: styleEven.pattern.pattern = styleEven.pattern.SOLID_PATTERN styleEven.pattern.pattern_fore_colour = S3XLS.ROW_ALTERNATING_COLOURS[ 1] # Header row colCnt = -1 headerRow = sheet1.row(2) fieldWidth = [] for label in headers: if label == "Sort": continue if report_groupby != None: if label == groupby_label: continue colCnt += 1 headerRow.write(colCnt, str(label), styleHeader) width = len(label) * S3XLS.COL_WIDTH_MULTIPLIER fieldWidth.append(width) sheet1.col(colCnt).width = width # Title row currentRow = sheet1.row(0) if colCnt > 0: sheet1.write_merge(0, 0, 0, colCnt, str(title), styleLargeHeader) currentRow = sheet1.row(1) currentRow.height = 440 currentRow.write(colCnt, request.now, styleNotes) # fix the size of the last column to display the date if 16 * S3XLS.COL_WIDTH_MULTIPLIER > width: sheet1.col(colCnt).width = 16 * S3XLS.COL_WIDTH_MULTIPLIER # Initialize counters totalCols = colCnt rowCnt = 3 colCnt = 0 subheading = None for item in items: # Item details rowCnt += 1 currentRow = sheet1.row(rowCnt) colCnt = 0 if rowCnt % 2 == 0: style = styleEven else: style = styleOdd for represent in item: coltype = types[colCnt] if coltype == "sort": continue label = headers[colCnt] if type(represent) is not str: represent = unicode(represent) if len(represent) > max_cell_size: represent = represent[:max_cell_size] # Strip away markup from representation try: markup = etree.XML(str(represent)) text = markup.xpath(".//text()") if text: text = " ".join(text) else: text = "" represent = text except: pass if report_groupby != None: if label == groupby_label: if subheading != represent: subheading = represent sheet1.write_merge(rowCnt, rowCnt, 0, totalCols, represent, styleSubHeader) rowCnt += 1 currentRow = sheet1.row(rowCnt) if rowCnt % 2 == 0: style = styleEven else: style = styleOdd continue value = represent if coltype == "date": try: format = str(settings.get_L10n_date_format()) cell_datetime = datetime.datetime.strptime( value, format) date_tuple = (cell_datetime.year, cell_datetime.month, cell_datetime.day) value = xldate_from_date_tuple(date_tuple, 0) style.num_format_str = date_format except: pass elif coltype == "datetime": try: format = str(settings.get_L10n_date_format()) cell_datetime = datetime.datetime.strptime( value, format) date_tuple = ( cell_datetime.year, cell_datetime.month, cell_datetime.day, cell_datetime.hour, cell_datetime.minute, cell_datetime.second, ) value = xldate_from_datetime_tuple(date_tuple, 0) style.num_format_str = datetime_format except: pass elif coltype == "time": try: format = str(settings.get_L10n_date_format()) cell_datetime = datetime.datetime.strptime( value, format) date_tuple = ( cell_datetime.hour, cell_datetime.minute, cell_datetime.second, ) value = xldate_from_time_tuple(date_tuple) style.num_format_str = time_format except: pass elif coltype == "integer": try: value = int(value) style.num_format_str = "0" except: pass elif coltype == "double": try: value = float(value) style.num_format_str = "0.00" except: pass currentRow.write(colCnt, value, style) width = len(represent) * S3XLS.COL_WIDTH_MULTIPLIER if width > fieldWidth[colCnt]: fieldWidth[colCnt] = width sheet1.col(colCnt).width = width colCnt += 1 sheet1.panes_frozen = True sheet1.horz_split_pos = 3 book.save(output) # Response headers filename = "%s_%s.xls" % (request.env.server_name, str(title)) disposition = "attachment; filename=\"%s\"" % filename response = current.response response.headers["Content-Type"] = contenttype(".xls") response.headers["Content-disposition"] = disposition output.seek(0) return output.read()
def encode(self, data_source, **attr): """ Export data as a Microsoft Excel spreadsheet @param data_source: the source of the data that is to be encoded as a spreadsheet. This may be: resource: the resource item: a list of pre-fetched values the headings are in the first row the data types are in the second row @param attr: dictionary of parameters: * title: The main title of the report * list_fields: Fields to include in list views * report_groupby: Used to create a grouping of the result: either a Field object of the resource or a string which matches a value in the heading * use_colour: True to add colour to the cells. default False """ try: import xlwt except ImportError: from ..s3rest import S3Request if current.auth.permission.format in S3Request.INTERACTIVE_FORMATS: current.session.error = self.ERROR.XLWT_ERROR redirect(URL(extension="")) else: error = self.ERROR.XLWT_ERROR current.log.error(error) return error try: from xlrd.xldate import xldate_from_date_tuple, \ xldate_from_time_tuple, \ xldate_from_datetime_tuple except ImportError: from ..s3rest import S3Request if current.auth.permission.format in S3Request.INTERACTIVE_FORMATS: current.session.error = self.ERROR.XLRD_ERROR redirect(URL(extension="")) else: error = self.ERROR.XLRD_ERROR current.log.error(error) return error import datetime request = current.request # The xlwt library supports a maximum of 182 characters in a single cell max_cell_size = 182 COL_WIDTH_MULTIPLIER = S3XLS.COL_WIDTH_MULTIPLIER # Get the attributes title = attr.get("title") list_fields = attr.get("list_fields") if not list_fields: list_fields = data_source.list_fields() group = attr.get("dt_group") use_colour = attr.get("use_colour", False) # Extract the data from the data_source if isinstance(data_source, (list, tuple)): headers = data_source[0] types = data_source[1] rows = data_source[2:] lfields = list_fields else: (title, types, lfields, headers, rows) = self.extractResource(data_source, list_fields) report_groupby = lfields[group] if group else None if len(rows) > 0 and len(headers) != len(rows[0]): msg = """modules/s3/codecs/xls: There is an error in the list_items, a field doesn't exist" requesting url %s Headers = %d, Data Items = %d Headers %s List Fields %s""" % (request.url, len(headers), len(items[0]), headers, list_fields) current.log.error(msg) groupby_label = headers[report_groupby] if report_groupby else None # Date/Time formats from L10N deployment settings settings = current.deployment_settings date_format = settings.get_L10n_date_format() date_format_str = str(date_format) date_format = S3XLS.dt_format_translate(date_format) time_format = S3XLS.dt_format_translate(settings.get_L10n_time_format()) datetime_format = S3XLS.dt_format_translate(settings.get_L10n_datetime_format()) # Create the workbook book = xlwt.Workbook(encoding="utf-8") # Add a sheet # Can't have a / in the sheet_name, so replace any with a space sheet_name = str(title.replace("/", " ")) # sheet_name cannot be over 31 chars if len(sheet_name) > 31: sheet_name = sheet_name[:31] sheets = [] rowLimit = 65536 #.xls exports are limited to 65536 rows per sheet, we bypass this by creating multiple sheets sheetnum = len(rows) / rowLimit count = 1 while len(sheets) <= sheetnum: sheets.append(book.add_sheet('%s-%s' % (sheet_name, count))) count += 1 # Styles styleLargeHeader = xlwt.XFStyle() styleLargeHeader.font.bold = True styleLargeHeader.font.height = 400 if use_colour: styleLargeHeader.alignment.horz = styleLargeHeader.alignment.HORZ_CENTER styleLargeHeader.pattern.pattern = styleLargeHeader.pattern.SOLID_PATTERN styleLargeHeader.pattern.pattern_fore_colour = S3XLS.LARGE_HEADER_COLOUR styleNotes = xlwt.XFStyle() styleNotes.font.italic = True styleNotes.font.height = 160 # 160 Twips = 8 point styleNotes.num_format_str = datetime_format styleHeader = xlwt.XFStyle() styleHeader.font.bold = True styleHeader.num_format_str = datetime_format if use_colour: styleHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN styleHeader.pattern.pattern_fore_colour = S3XLS.HEADER_COLOUR styleSubHeader = xlwt.XFStyle() styleSubHeader.font.bold = True if use_colour: styleSubHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN styleSubHeader.pattern.pattern_fore_colour = S3XLS.SUB_HEADER_COLOUR styleOdd = xlwt.XFStyle() if use_colour: styleOdd.pattern.pattern = styleOdd.pattern.SOLID_PATTERN styleOdd.pattern.pattern_fore_colour = S3XLS.ROW_ALTERNATING_COLOURS[0] styleEven = xlwt.XFStyle() if use_colour: styleEven.pattern.pattern = styleEven.pattern.SOLID_PATTERN styleEven.pattern.pattern_fore_colour = S3XLS.ROW_ALTERNATING_COLOURS[1] for sheet in sheets: # Header row colCnt = 0 # Move this down if a title row will be added if settings.get_xls_title_row(): headerRow = sheet.row(2) else: headerRow = sheet.row(0) fieldWidths = [] id = False for selector in lfields: if selector == report_groupby: continue label = headers[selector] if label == "Id": # Indicate to adjust colCnt when writing out id = True fieldWidths.append(0) colCnt += 1 continue if label == "Sort": continue if id: # Adjust for the skipped column writeCol = colCnt - 1 else: writeCol = colCnt headerRow.write(writeCol, str(label), styleHeader) width = max(len(label) * COL_WIDTH_MULTIPLIER, 2000) width = min(width, 65535) # USHRT_MAX fieldWidths.append(width) sheet.col(writeCol).width = width colCnt += 1 # Title row (optional, deployment setting) if settings.get_xls_title_row(): for sheet in sheets: # First row => Title (standard = "title_list" CRUD string) currentRow = sheet.row(0) if colCnt > 0: sheet.write_merge(0, 0, 0, colCnt, str(title), styleLargeHeader) currentRow.height = 500 # Second row => Export date/time currentRow = sheet.row(1) currentRow.write(0, str(current.T("Date Exported:")), styleNotes) currentRow.write(1, request.now, styleNotes) # Fix the size of the last column to display the date if 16 * COL_WIDTH_MULTIPLIER > width: sheet.col(colCnt).width = 16 * COL_WIDTH_MULTIPLIER # Initialize counters totalCols = colCnt # Move the rows down if a title row is included if settings.get_xls_title_row(): rowCnt = 2 else: rowCnt = 0 subheading = None for row in rows: # Item details rowCnt += 1 sheetCnt = (rowCnt / rowLimit) if sheetCnt == 0: currentRow = sheets[sheetCnt].row(rowCnt - (sheetCnt * rowLimit)) else: currentRow = sheets[sheetCnt].row(rowCnt - (sheetCnt * rowLimit) + 1) colCnt = 0 if rowCnt % 2 == 0: style = styleEven else: style = styleOdd if report_groupby: represent = s3_strip_markup(s3_unicode(row[report_groupby])) if subheading != represent: subheading = represent sheets[sheetCnt].write_merge(rowCnt, rowCnt, 0, totalCols, subheading, styleSubHeader) rowCnt += 1 currentRow = sheets[sheetCnt].row(rowCnt) if rowCnt % 2 == 0: style = styleEven else: style = styleOdd for field in lfields: label = headers[field] if label == groupby_label: continue if label == "Id": # Skip the ID column from XLS exports colCnt += 1 continue represent = s3_strip_markup(s3_unicode(row[field])) coltype = types[colCnt] if coltype == "sort": continue if len(represent) > max_cell_size: represent = represent[:max_cell_size] value = represent if coltype == "date": try: cell_datetime = datetime.datetime.strptime(value, date_format_str) date_tuple = (cell_datetime.year, cell_datetime.month, cell_datetime.day) value = xldate_from_date_tuple(date_tuple, 0) style.num_format_str = date_format except: pass elif coltype == "datetime": try: cell_datetime = datetime.datetime.strptime(value, date_format_str) date_tuple = (cell_datetime.year, cell_datetime.month, cell_datetime.day, cell_datetime.hour, cell_datetime.minute, cell_datetime.second) value = xldate_from_datetime_tuple(date_tuple, 0) style.num_format_str = datetime_format except: pass elif coltype == "time": try: cell_datetime = datetime.datetime.strptime(value, date_format_str) date_tuple = (cell_datetime.hour, cell_datetime.minute, cell_datetime.second) value = xldate_from_time_tuple(date_tuple) style.num_format_str = time_format except: pass elif coltype == "integer": try: value = int(value) style.num_format_str = "0" except: pass elif coltype == "double": try: value = float(value) style.num_format_str = "0.00" except: pass if id: # Adjust for the skipped column writeCol = colCnt - 1 else: writeCol = colCnt currentRow.write(writeCol, value, style) width = len(represent) * COL_WIDTH_MULTIPLIER if width > fieldWidths[colCnt]: fieldWidths[colCnt] = width sheets[sheetCnt].col(writeCol).width = width colCnt += 1 for sheet in sheets: sheet.panes_frozen = True sheet.horz_split_pos = 1 output = StringIO() book.save(output) # Response headers filename = "%s_%s.xls" % (request.env.server_name, str(title)) disposition = "attachment; filename=\"%s\"" % filename response = current.response response.headers["Content-Type"] = contenttype(".xls") response.headers["Content-disposition"] = disposition output.seek(0) return output.read()
def load_err_file(self): """Read .err file and return None if it fails.""" reader = self.open_err_file(self.fname) if reader is None: return for row, line in enumerate(reader): try: if line[0] == 'VARIABLE': break elif line[0] == 'Station Name': self.staName = reader[row][1] elif line[0] == 'Climate Identifier': self.climID = reader[row][1] except IndexError: continue row += 1 # ------------------------------------------------ Re-Organizes Data -- # Get unique weather variable names DATA = np.array(reader[row:]) self.varNames = np.unique(DATA[:, 0]) self.varTypes = ['continuous'] * (len(self.varNames)) # Splits data acoording to the weather variables found. self.Yp, self.Ym, self.Time, self.Date = [], [], [], [] for i, var in enumerate(self.varNames): indx = np.where(DATA[:, 0] == var)[0] self.Yp.append(DATA[indx, 7].astype(float)) self.Ym.append(DATA[indx, 8].astype(float)) y = DATA[indx, 1].astype(int) m = DATA[indx, 2].astype(int) d = DATA[indx, 3].astype(int) # ---- Time ---- t = np.zeros(len(y)) for date in range(len(y)): t[date] = (xldate_from_date_tuple((y[date], m[date], d[date]), 0) - xldate_from_date_tuple((y[date], 1, 1), 0)) self.Time.append(t) self.Date.append([y, m, d]) # ---- Weather Variable Type ---- # If the proportion of zeros in the data series is higher # than 25%, the data type is set as an event-based weather # variable. Otherwise, default value is kept and variable is # considered to be continuous in time. # # The precipitation (solid, liquid or total) is a good example of # an event-based variable, while air temperature (min, max or mean) # is a good example of a continuous variable. pc0 = len(np.where(self.Ym[i] == 0)[0]) / float(len(self.Ym[i])) if pc0 > 0.25: self.varTypes[i] = 'event-based' return
def encode(self, resource, **attr): """ Export data as a Microsoft Excel spreadsheet @param resource: the source of the data that is to be encoded as a spreadsheet, can be either of: 1) an S3Resource 2) an array of value dicts (dict of column labels as first item, list of field types as second item) 3) a dict like: {columns: [key, ...], headers: {key: label}, types: {key: type}, rows: [{key:value}], } @param attr: keyword arguments (see below) @keyword as_stream: return the buffer (BytesIO) rather than its contents (str), useful when the output is supposed to be stored locally @keyword title: the main title of the report @keyword list_fields: fields to include in list views @keyword report_groupby: used to create a grouping of the result: either a Field object of the resource or a string which matches a value in the heading @keyword use_colour: True to add colour to the cells, default False @keyword evenodd: render different background colours for even/odd rows ("stripes") """ # Do not redirect from here! # ...but raise proper status code, which can be caught by caller try: import xlwt except ImportError: error = self.ERROR.XLWT_ERROR current.log.error(error) raise HTTP(503, body=error) try: from xlrd.xldate import xldate_from_date_tuple, \ xldate_from_time_tuple, \ xldate_from_datetime_tuple except ImportError: error = self.ERROR.XLRD_ERROR current.log.error(error) raise HTTP(503, body=error) import datetime MAX_CELL_SIZE = self.MAX_CELL_SIZE COL_WIDTH_MULTIPLIER = self.COL_WIDTH_MULTIPLIER # Get the attributes title = attr.get("title") if title is None: title = current.T("Report") list_fields = attr.get("list_fields") group = attr.get("dt_group") use_colour = attr.get("use_colour", False) evenodd = attr.get("evenodd", True) # Extract the data from the resource if isinstance(resource, dict): headers = resource.get("headers", {}) lfields = resource.get("columns", list_fields) column_types = resource.get("types") types = [column_types[col] for col in lfields] rows = resource.get("rows") elif isinstance(resource, (list, tuple)): headers = resource[0] types = resource[1] rows = resource[2:] lfields = list_fields else: if not list_fields: list_fields = resource.list_fields() (title, types, lfields, headers, rows) = self.extract(resource, list_fields, ) # Verify columns in items request = current.request if len(rows) > 0 and len(lfields) > len(rows[0]): msg = """modules/s3/codecs/xls: There is an error in the list items, a field doesn't exist requesting url %s Headers = %d, Data Items = %d Headers %s List Fields %s""" % (request.url, len(lfields), len(rows[0]), headers, lfields) current.log.error(msg) # Grouping report_groupby = lfields[group] if group else None groupby_label = headers[report_groupby] if report_groupby else None # Date/Time formats from L10N deployment settings settings = current.deployment_settings date_format = settings.get_L10n_date_format() date_format_str = str(date_format) dt_format_translate = self.dt_format_translate date_format = dt_format_translate(date_format) time_format = dt_format_translate(settings.get_L10n_time_format()) datetime_format = dt_format_translate(settings.get_L10n_datetime_format()) title_row = settings.get_xls_title_row() # Get styles styles = self._styles(use_colour = use_colour, evenodd = evenodd, datetime_format = datetime_format, ) # Create the workbook book = xlwt.Workbook(encoding="utf-8") # Add sheets sheets = [] # XLS exports are limited to 65536 rows per sheet, we bypass # this by creating multiple sheets row_limit = 65536 sheetnum = len(rows) / row_limit # Can't have a / in the sheet_name, so replace any with a space sheet_name = s3_str(title.replace("/", " ")) if len(sheet_name) > 28: # Sheet name cannot be over 31 chars # (take sheet number suffix into account) sheet_name = sheet_name[:28] count = 1 while len(sheets) <= sheetnum: sheets.append(book.add_sheet("%s-%s" % (sheet_name, count))) count += 1 if callable(title_row): # Calling with sheet None to get the number of title rows title_row_length = title_row(None) else: title_row_length = 2 # Add header row to all sheets, determine columns widths header_style = styles["header"] for sheet in sheets: # Move this down if a title row will be added if title_row: header_row = sheet.row(title_row_length) else: header_row = sheet.row(0) column_widths = [] has_id = False col_index = 0 for selector in lfields: if selector == report_groupby: continue label = headers[selector] if label == "Id": # Indicate to adjust col_index when writing out has_id = True column_widths.append(0) col_index += 1 continue if label == "Sort": continue if has_id: # Adjust for the skipped column write_col_index = col_index - 1 else: write_col_index = col_index header_row.write(write_col_index, str(label), header_style) width = max(len(label) * COL_WIDTH_MULTIPLIER, 2000) width = min(width, 65535) # USHRT_MAX column_widths.append(width) sheet.col(write_col_index).width = width col_index += 1 title = s3_str(title) # Title row (optional, deployment setting) if title_row: T = current.T large_header_style = styles["large_header"] notes_style = styles["notes"] for sheet in sheets: if callable(title_row): # Custom title rows title_row(sheet) else: # First row => Title (standard = "title_list" CRUD string) current_row = sheet.row(0) if col_index > 0: sheet.write_merge(0, 0, 0, col_index, title, large_header_style, ) current_row.height = 500 # Second row => Export date/time current_row = sheet.row(1) current_row.write(0, "%s:" % T("Date Exported"), notes_style) current_row.write(1, request.now, notes_style) # Fix the size of the last column to display the date if 16 * COL_WIDTH_MULTIPLIER > width: sheet.col(col_index).width = 16 * COL_WIDTH_MULTIPLIER # Initialize counters total_cols = col_index # Move the rows down if a title row is included if title_row: row_index = title_row_length else: row_index = 0 # Helper function to get the current row def get_current_row(row_count, row_limit): sheet_count = int(row_count / row_limit) row_number = row_count - (sheet_count * row_limit) if sheet_count > 0: row_number += 1 return sheets[sheet_count], sheets[sheet_count].row(row_number) # Write the table contents subheading = None odd_style = styles["odd"] even_style = styles["even"] subheader_style = styles["subheader"] for row in rows: # Current row row_index += 1 current_sheet, current_row = get_current_row(row_index, row_limit) style = even_style if row_index % 2 == 0 else odd_style # Group headers if report_groupby: represent = s3_strip_markup(s3_unicode(row[report_groupby])) if subheading != represent: # Start of new group - write group header subheading = represent current_sheet.write_merge(row_index, row_index, 0, total_cols, subheading, subheader_style, ) # Move on to next row row_index += 1 current_sheet, current_row = get_current_row(row_index, row_limit) style = even_style if row_index % 2 == 0 else odd_style col_index = 0 remaining_fields = lfields # Custom row style? row_style = None if "_style" in row: stylename = row["_style"] if stylename in styles: row_style = styles[stylename] # Group header/footer row? if "_group" in row: group_info = row["_group"] label = group_info.get("label") totals = group_info.get("totals") if label: label = s3_strip_markup(s3_unicode(label)) style = row_style or subheader_style span = group_info.get("span") if span == 0: current_sheet.write_merge(row_index, row_index, 0, total_cols - 1, label, style, ) if totals: # Write totals into the next row row_index += 1 current_sheet, current_row = \ get_current_row(row_index, row_limit) else: current_sheet.write_merge(row_index, row_index, 0, span - 1, label, style, ) col_index = span remaining_fields = lfields[span:] if not totals: continue for field in remaining_fields: label = headers[field] if label == groupby_label: continue if label == "Id": # Skip the ID column from XLS exports col_index += 1 continue if field not in row: represent = "" else: represent = s3_strip_markup(s3_unicode(row[field])) coltype = types[col_index] if coltype == "sort": continue if len(represent) > MAX_CELL_SIZE: represent = represent[:MAX_CELL_SIZE] value = represent if coltype == "date": try: cell_datetime = datetime.datetime.strptime(value, date_format_str) date_tuple = (cell_datetime.year, cell_datetime.month, cell_datetime.day) value = xldate_from_date_tuple(date_tuple, 0) style.num_format_str = date_format except: pass elif coltype == "datetime": try: cell_datetime = datetime.datetime.strptime(value, date_format_str) date_tuple = (cell_datetime.year, cell_datetime.month, cell_datetime.day, cell_datetime.hour, cell_datetime.minute, cell_datetime.second) value = xldate_from_datetime_tuple(date_tuple, 0) style.num_format_str = datetime_format except: pass elif coltype == "time": try: cell_datetime = datetime.datetime.strptime(value, date_format_str) date_tuple = (cell_datetime.hour, cell_datetime.minute, cell_datetime.second) value = xldate_from_time_tuple(date_tuple) style.num_format_str = time_format except: pass elif coltype == "integer": try: value = int(value) style.num_format_str = "0" except: pass elif coltype == "double": try: value = float(value) style.num_format_str = "0.00" except: pass if has_id: # Adjust for the skipped column write_col_index = col_index - 1 else: write_col_index = col_index current_row.write(write_col_index, value, style) width = len(represent) * COL_WIDTH_MULTIPLIER if width > column_widths[col_index]: column_widths[col_index] = width current_sheet.col(write_col_index).width = width col_index += 1 # Additional sheet settings for sheet in sheets: sheet.panes_frozen = True sheet.horz_split_pos = 1 # Write output output = BytesIO() book.save(output) output.seek(0) if attr.get("as_stream", False): return output # Response headers filename = "%s_%s.xls" % (request.env.server_name, title) disposition = "attachment; filename=\"%s\"" % filename response = current.response response.headers["Content-Type"] = contenttype(".xls") response.headers["Content-disposition"] = disposition return output.read()
def plot_synth_hydrograph(WL, TIME, WLogger, TIMELogger): #=============================================================================== WL = np.abs(WL) / 1000. YEAR = np.arange(1970, 2015).astype('int') fig = plt.figure(figsize=(15, 7)) fig.patch.set_facecolor('white') fheight = fig.get_figheight() fwidth = fig.get_figwidth() left_margin = 1 right_margin = 0.35 bottom_margin = 0.75 top_margin = 0.25 x0 = left_margin / fwidth y0 = bottom_margin / fheight w0 = 1 - (left_margin + right_margin) / fwidth h0 = 1 - (bottom_margin + top_margin) / fheight WLogger = waterlvlObj.lvl # Observed groundwater level (mbgs) TIMELogger = waterlvlObj.time # Time (days) # WLintrp = np.interp(TIMELogger, TIME, WL) # dWL = np.mean(WLintrp) - np.mean(WLogger) # WL -= dWL #-------------------------------------------------------- AXES CREATION ---- ax0 = fig.add_axes([x0, y0, w0, h0]) ax0.patch.set_visible(True) ax1 = fig.add_axes(ax0.get_position(), frameon=False, zorder=1) ax1.patch.set_visible(False) #----------------------------------------------------------- AXIS RANGE ---- Ymin0 = 0 Ymax0 = 1 Xmin0 = YEAR[0]-1 Xmax0 = YEAR[-1] ax0.axis([Xmin0, Xmax0, Ymin0, Ymax0]) Ymin1 = 6 Ymax1 = 8.2 Xmin1 = xldate_from_date_tuple((YEAR[0], 01, 01), 0) Xmax1 = xldate_from_date_tuple((YEAR[-1]+1, 01, 01), 0) ax1.axis([Xmin1, Xmax1, Ymin1, Ymax1]) #----------------------------------------------------- XTICKS FORMATING ---- ax0.xaxis.set_ticks_position('bottom') ax0.tick_params(axis='x',direction='out', gridOn=True) ax0.set_xticks(YEAR) ax0.xaxis.set_ticklabels([]) ax0.set_xticks(YEAR[::2]-0.4, minor=True) ax0.tick_params(axis='x', which='minor', length=0, gridOn=False, pad=5) ax0.xaxis.set_ticklabels(YEAR[::2], minor=True, rotation=90, horizontalalignment='center', fontsize=12) ax1.tick_params(axis='x', length=0, gridOn=False) ax1.xaxis.set_ticklabels([]) #----------------------------------------------------- YTICKS FORMATING ---- ax0.tick_params(axis='y', length=0, gridOn=False) ax0.yaxis.set_ticklabels([]) ax1.yaxis.set_ticks_position('left') ax1.set_yticks(np.arange(Ymin1, Ymax1, 0.2)) ax1.tick_params(axis='y',direction='out', gridOn=True) ax1.invert_yaxis() # ax1.set_yticks(np.arange(0, 700, 50), minor=True) # ax0.tick_params(axis='y',direction='out', which='minor', gridOn=True) #--------------------------------------------------------------- LABELS ---- ax1.set_ylabel('Water Level (mbgs)', fontsize=14, verticalalignment='bottom') ax1.yaxis.set_label_coords(-0.04, 0.5) # # ax0.set_xlabel(LabelDB.years, fontsize=label_font_size, # verticalalignment='top') # ax0.xaxis.set_label_coords(0.5, -0.075) #------------------------------------------------------------- PLOTTING ---- ax1.plot(TIME, WL, color='blue', linestyle='-', label='Simulated water levels') ax1.plot(TIMELogger, WLogger, color='red', linestyle='-', label="Automatic water level measurements (Solinst Levelogger)") ax1.plot([TIME[0], TIME[-1]], [6.8, 6.8], color='black', linestyle='--') Xtext = xldate_from_date_tuple((1994, 01, 01), 0)-250 ax1.text(Xtext, 6.75, 'Sandy Loam (Sy = 0.18)', fontsize=14) ax1.text(Xtext, 6.95, 'Loamy Sand (Sy = 0.29)', fontsize=14) # print WL # marker='None', label='Trend Line ETP', clip_on=False, # zorder=100) TIMEobs = np.array([35034, 35400, 35674, 40878, 41214, 41609, 41876]) indx = np.where(TIMEobs[0] == TIME)[0][0] WL_P1A = 505 - np.array([504.95, np.nan, np.nan, 505.51, 505.82, 505.96, 506.107]) dWL = WL_P1A[0] - WL[indx] WL_P1A -= dWL WL_P1B = 507 - np.array([504.95, 505.08, 505.02, 505.69, 505.99, 506.14, 506.278]) dWL = WL_P1B[0] - WL[indx] WL_P1B -= dWL WL_P2A = 507 - np.array([504.93, 505.06, np.nan, 505.66, 505.98, 506.14, 506.265]) dWL = WL_P2A[0] - WL[indx] WL_P2A -= dWL WL_P2B = 507 - np.array([504.93, 505.06, 505, 505.65, 505.97, 506.13, 506.272]) dWL = WL_P2B[0] - WL[indx] WL_P2B -= dWL WL_P3A = 507 - np.array([504.91, 505.05, np.nan, 505.65, np.nan, 506.11, 506.255]) dWL = WL_P3A[0] - WL[indx] WL_P3A -= dWL WL_P4B = 507 - np.array([504.93, 505.04, 504.99, 505.6, 505.94, 506.12, 506.277]) dWL = WL_P4B[0] - WL[indx] WL_P4B -= dWL # WL_P19 = np.array([np.nan, np.nan, np.nan, 6.8, 6.47, 6.29, 6.15]) WL_P19 = np.array([6.8+0.73, 6.8+0.59, 6.8+0.64, 6.8, 6.47, 6.29, 6.15]) # indx = np.where(TIMEobs[3] == TIME)[0][0] # dWL = WL_P19[3] - WL[indx] # WL_P19 -= dWL WLobs = np.array([[504.95, 505.98, np.nan, 505.51, 505.82, 505.96, 506.107], [504.95, 505.08, 505.02, 505.69, 505.99, 506.14, 506.278], [504.93, 505.06, np.nan, 505.66, 505.98, 506.14, 506.265], [504.93, 505.06, 505.00, 505.65, 505.97, 506.13, 506.272], [504.91, 505.05, np.nan, 505.65, np.nan, 506.11, 506.255], [504.93, 505.04, 504.99, 505.60, 505.94, 506.12, 506.277], [np.nan, np.nan, np.nan, 505.77, 506.10, 506.28, 506.42]]) # WLobs_mean = np.zeros(7) # for i in range(7): # indx = np.where(~np.isnan(WLobs[:, i])) # WLobs_mean[i] = np.mean(WLobs[indx, i]) # # WLobs_mean = 510 - WLobs_mean # indx = np.where(TIMEobs[0] == TIME)[0][0] # dWL = WLobs_mean[0] - WL[indx] # WLobs_mean -= dWL # marker_size = 8 marker_style = 'o' alpha_val = 1 # # ax1.plot(TIMEobs, WLobs_mean, # markerfacecolor='red', markeredgecolor='red', marker=marker_style, # markersize=marker_size, linestyle='None', label='ETP', # clip_on=False, zorder=100, alpha = alpha_val) # ax1.plot(TIMEobs, WL_P1A, # markerfacecolor='red', markeredgecolor='red', marker=marker_style, # markersize=marker_size, linestyle='None', label='ETP', # clip_on=False, zorder=100, alpha = alpha_val) # # ax1.plot(TIMEobs, WL_P1B, # markerfacecolor='green', markeredgecolor='green', marker=marker_style, # markersize=marker_size, linestyle='None', label='ETP', # clip_on=False, zorder=100, alpha = alpha_val) # # ax1.plot(TIMEobs, WL_P2A, # markerfacecolor='blue', markeredgecolor='blue', marker=marker_style, # markersize=marker_size, linestyle='None', label='ETP', # clip_on=False, zorder=100, alpha = alpha_val) # # ax1.plot(TIMEobs, WL_P2B, # markerfacecolor='orange', markeredgecolor='orange', marker=marker_style, # markersize=marker_size, linestyle='None', label='ETP', # clip_on=False, zorder=100, alpha = alpha_val) # # ax1.plot(TIMEobs, WL_P3A, # markerfacecolor='magenta', markeredgecolor='magenta', marker=marker_style, # markersize=marker_size, linestyle='None', label='ETP', # clip_on=False, zorder=100, alpha = alpha_val) # ax1.plot(TIMEobs, WL_P3A, # markerfacecolor='cyan', markeredgecolor='cyan', marker=marker_style, # markersize=marker_size, linestyle='None', label='ETP', # clip_on=False, zorder=100, alpha = alpha_val) ax1.plot(TIMEobs[3:], WL_P19[3:], markerfacecolor='black', markeredgecolor='black', marker=marker_style, markersize=marker_size, linestyle='None', label='Manual water level measurements in well P19', clip_on=False, zorder=90, alpha = alpha_val) ax1.plot(TIMEobs[:3], WL_P19[:3], markerfacecolor='black', markeredgecolor='black', marker='D', markersize=6, linestyle='None', label='Extrapolated water levels at P19 from manual measurements in neighboring wells', clip_on=False, zorder=90, alpha = alpha_val) # #----- RUNOF ----- # # ax0.plot(YEAR-0.5, YEARLY_RUNOFF, # color='red', markeredgecolor='None', marker='s', # markersize=5, linestyle=lspoint, label='Runoff', # clip_on=False, zorder=100) # # A = np.polyfit(YEAR-0.5, YEARLY_RUNOFF, 1) # print 'Trend Runoff =', A[0], ' mm/y' # TREND1 = A[0]*(YEAR-0.5) + A[1] # ax0.plot(YEAR-0.5, TREND1, color='red', linestyle=lstrend, # marker='None', label='Trend Line Runoff', clip_on=False, # zorder=100) # # #----- ETP ----- # # ax0.plot(YEAR-0.5, YEARLY_ET, # color='green', markeredgecolor='None', marker='D', # markersize=5, linestyle=lspoint, label='ETP', # clip_on=False, zorder=100) # # A = np.polyfit(YEAR-0.5, YEARLY_ET, 1) # print 'Trend ETP =', A[0], ' mm/y' # TREND1 = A[0]*(YEAR-0.5) + A[1] # ax0.plot(YEAR-0.5, TREND1, color='green', linestyle=lstrend, # marker='None', label='Trend Line ETP', clip_on=False, # zorder=100) #----------------------------------------------------------------LEGEND----- ax1.legend(loc=2, ncol=1, numpoints=1, fontsize=12)