def generate_chart(file_path): time_serieses = [] from scipy import stats regression_lines = [] R = self.R c = R("c") spec_names = [] starts = [] ends = [] yearly = [] for label, spec in specs: query_expression = spec["query_expression"] expression = DSL.parse(query_expression) understood_expression_string = str(expression) spec_names.append(label) units = DSL.units(expression) unit_string = str(units) if units is None: analysis_strings = [] def analysis_out(*things): analysis_strings.append("".join(map(str, things))) DSL.analysis(expression, analysis_out) raise MeaninglessUnitsException( "\n".join(analysis_strings)) is_yearly_values = "Months(" in query_expression yearly.append(is_yearly_values) if is_yearly_values: if "Prev" in query_expression: # PreviousDecember handling: grouping_key = "(time_period - ((time_period + 1000008 + %i +1) %% 12))" % start_month_0_indexed else: grouping_key = "(time_period - ((time_period + 1000008 + %i) %% 12))" % start_month_0_indexed else: grouping_key = "time_period" code = DSL.R_Code_for_values( expression, grouping_key, "place_id IN (%s)" % ",".join(map(str, spec["place_ids"]))) #print code values_by_time_period_data_frame = R(code)() data = {} if isinstance(values_by_time_period_data_frame, self.robjects.vectors.StrVector): raise Exception(str(values_by_time_period_data_frame)) elif values_by_time_period_data_frame.ncol == 0: pass else: keys = values_by_time_period_data_frame.rx2("key") values = values_by_time_period_data_frame.rx2("value") try: display_units = { "Kelvin": "Celsius", }[unit_string] except KeyError: converter = lambda x: x display_units = unit_string else: converter = units_in_out[display_units]["out"] linear_regression = R("{}") previous_december_month_offset = [ 0, 1 ][is_yearly_values and "Prev" in query_expression] def month_number_to_float_year(month_number): year, month = month_number_to_year_month( month_number + previous_december_month_offset) return year + (float(month - 1) / 12) converted_keys = map(month_number_to_float_year, keys) converted_values = map(converter, values) regression_lines.append( stats.linregress(converted_keys, converted_values)) add = data.__setitem__ for key, value in zip(keys, values): #print key, value add(key, value) # assume monthly values and monthly time_period start_month_number = min(data.iterkeys()) starts.append(start_month_number) start_year, start_month = month_number_to_year_month( start_month_number + previous_december_month_offset) end_month_number = max(data.iterkeys()) ends.append(end_month_number) end_year, end_month = month_number_to_year_month( end_month_number + previous_december_month_offset) values = [] for month_number in range(start_month_number, end_month_number + 1, [1, 12][is_yearly_values]): if not data.has_key(month_number): values.append(None) else: values.append(converter(data[month_number])) if is_yearly_values: time_serieses.append( R("ts")(self.robjects.FloatVector(values), start=c(start_year), end=c(end_year), frequency=1)) else: time_serieses.append( R("ts")(self.robjects.FloatVector(values), start=c(start_year, start_month), end=c(end_year, end_month), frequency=12)) min_start = min(starts) max_end = max(ends) show_months = any(not is_yearly for is_yearly in yearly) if show_months: # label_step spaces out the x-axis marks sensibly based on # width by not marking all of them. ticks = (max_end - min_start) + 1 # ticks should be made at 1,2,3,4,6,12 month intervals # or 1, 2, 5, 10, 20, 50 year intervals # depending on the usable width and the number of ticks # ticks should be at least 15 pixels apart usable_width = width - 100 max_ticks = usable_width / 15.0 Y = 12 for step in [ 1, 2, 3, 4, 6, 12, 2 * Y, 5 * Y, 10 * Y, 20 * Y, 50 * Y ]: if ticks / step <= max_ticks: break axis_points = [] axis_labels = [] month_names = ("Jan Feb Mar Apr May Jun " "Jul Aug Sep Oct Nov Dec").split(" ") for month_number in range(min_start, max_end + 1, step): year, month = month_number_to_year_month(month_number) month -= 1 axis_points.append(year + (month / 12.0)) axis_labels.append("%s %i" % (month_names[month], year)) else: # show only years axis_points = [] axis_labels = [] start_year, start_month = month_number_to_year_month(min_start) end_year, end_month = month_number_to_year_month(max_end) for year in range(start_year, end_year + 1): axis_points.append(year) axis_labels.append(year) display_units = display_units.replace("Celsius", "\xc2\xb0Celsius") R.png(filename=file_path, width=width, height=height) plot_chart = R(""" function ( xlab, ylab, n, names, axis_points, axis_labels, axis_orientation, plot_type, width, height, total_margin_height, line_interspacing, ... ) { split_names <- lapply( names, strwrap, width=(width - 100)/5 ) wrapped_names <- lapply( split_names, paste, collapse='\n' ) legend_line_count = sum(sapply(split_names, length)) legend_height_inches <- grconvertY( -( (legend_line_count * 11) + (length(wrapped_names) * 6) + 30 ), "device", "inches" ) - grconvertY(0, "device", "inches") par( xpd = T, mai = (par()$mai + c(legend_height_inches , 0, 0, 0)) ) ts.plot(..., gpars = list( xlab = xlab, ylab = ylab, col = c(1:n), pch = c(21:25), type = plot_type, xaxt = 'n' ) ) axis( 1, at = axis_points, labels = axis_labels, las = axis_orientation ) legend( par()$usr[1], par()$usr[3] - ( grconvertY(0, "device", "user") - grconvertY(70, "device", "user") ), wrapped_names, cex = 0.8, pt.bg = c(1:n), pch = c(21:25), bty = 'n', y.intersp = line_interspacing, text.width = 3 ) }""") for regression_line, i in zip(regression_lines, range(len(time_serieses))): slope, intercept, r, p, stderr = regression_line if isnan(slope) or isnan(intercept): spec_names[i] += " {cannot calculate linear regression}" else: if isnan(p): p_str = "NaN" else: p_str = str(round_to_4_sd(p)) if isnan(stderr): stderr_str = "NaN" else: stderr_str = str(round_to_4_sd(p)) slope_str, intercept_str, r_str = map( str, map(round_to_4_sd, (slope, intercept, r))) spec_names[i] += ( u" {" "y=%(slope_str)s x year %(add)s%(intercept_str)s, " "r= %(r_str)s, " "p= %(p_str)s, " "S.E.= %(stderr_str)s" "}") % dict(locals(), add=[u"+ ", u"" ][intercept_str.startswith("-")]) plot_chart( xlab="", ylab=display_units, n=len(time_serieses), names=spec_names, axis_points=axis_points, axis_labels=axis_labels, axis_orientation=[0, 2][show_months], plot_type="lo"[is_yearly_values], width=width, height=height, # R uses Normalised Display coordinates. # these have been found by recursive improvement # they place the legend legibly. tested up to 8 lines total_margin_height=150, line_interspacing=1.8, *time_serieses) for regression_line, colour_number in zip( regression_lines, range(len(time_serieses))): slope = regression_line[0] intercept = regression_line[1] if isnan(slope) or isnan(intercept): pass else: R.par(xpd=False) R.abline(intercept, slope, col=colour_number + 1) R("dev.off()") import Image, ImageEnhance RGBA = "RGBA" def reduce_opacity(image, opacity): """Returns an image with reduced opacity.""" assert opacity >= 0 and opacity <= 1 if image.mode != RGBA: image = image.convert(RGBA) else: image = image.copy() alpha = image.split()[3] alpha = ImageEnhance.Brightness(alpha).enhance(opacity) image.putalpha(alpha) return image def scale_preserving_aspect_ratio(image, ratio): return image.resize(map(int, map(ratio.__mul__, image.size))) def watermark(image, mark, position, opacity=1): """Adds a watermark to an image.""" if opacity < 1: mark = reduce_opacity(mark, opacity) if image.mode != RGBA: image = image.convert(RGBA) # create a transparent layer the size of the # image and draw the watermark in that layer. layer = Image.new(RGBA, image.size, (0, 0, 0, 0)) if position == 'tile': for y in range(0, image.size[1], mark.size[1]): for x in range(0, image.size[0], mark.size[0]): layer.paste(mark, (x, y)) elif position == 'scale': # scale, but preserve the aspect ratio ratio = min( float(image.size[0]) / mark.size[0], float(image.size[1]) / mark.size[1]) w = int(mark.size[0] * ratio) h = int(mark.size[1] * ratio) mark = mark.resize((w, h)) layer.paste(mark, ((image.size[0] - w) / 2, (image.size[1] - h) / 2)) else: layer.paste(mark, position) # composite the watermark with the layer return Image.composite(layer, image, layer) image = Image.open(file_path) watermark_image_path = os.path.join(os.path.realpath("."), "applications", current.request.application, "static", "img", "Nepal-Government-Logo.png") watermark_image = Image.open(watermark_image_path) #watermark_image = scale_preserving_aspect_ratio(watermark_image, 0.5) watermark(image, watermark_image, 'scale', 0.05).save(file_path)
def get_overlay_data(self, query_expression): env = self.env DSL = env.DSL expression = DSL.parse(query_expression) understood_expression_string = str(expression) units = DSL.units(expression) if units is None: analysis_strings = [] def analysis_out(*things): analysis_strings.append("".join(map(str, things))) DSL.analysis(expression, analysis_out) raise MeaninglessUnitsException("\n".join(analysis_strings)) def generate_map_overlay_data(file_path): R = self.R code = DSL.R_Code_for_values(expression, "place_id") values_by_place_data_frame = R(code)() # R willfully removes empty data frame columns # which is ridiculous behaviour if isinstance(values_by_place_data_frame, self.robjects.vectors.StrVector): raise Exception(str(values_by_place_data_frame)) elif values_by_place_data_frame.ncol == 0: keys = [] values = [] else: keys = values_by_place_data_frame.rx2("key") values = values_by_place_data_frame.rx2("value") overlay_data_file = None try: overlay_data_file = open(file_path, "w") write = overlay_data_file.write write('{') # sent back for acknowledgement: write('"understood_expression":"%s",'.__mod__( understood_expression_string.replace('"', '\\"'))) write('"units":"%s",' % units) write('"grid_size":%f,' % min(grid_sizes(expression))) write('"keys":[') write(",".join(map(str, keys))) write('],') write('"values":[') write(",".join( map(lambda value: str(round_to_4_sd(value)), values))) write(']') write('}') except: if overlay_data_file: overlay_data_file.close() os.unlink(file_path) raise finally: overlay_data_file.close() return get_cached_or_generated_file( hashlib.md5(understood_expression_string).hexdigest() + ".json", generate_map_overlay_data)
def get_csv_location_data(self, query_expression): env = self.env DSL = env.DSL expression = DSL.parse(query_expression) understood_expression_string = str(expression) units = DSL.units(expression) if units is None: analysis_strings = [] def analysis_out(*things): analysis_strings.append("".join(map(str, things))) DSL.analysis(expression, analysis_out) raise MeaninglessUnitsException("\n".join(analysis_strings)) def generate_map_csv_data(file_path): R = self.R code = DSL.R_Code_for_values(expression, "place_id") values_by_place_data_frame = R(code)() # R willfully removes empty data frame columns # which is ridiculous behaviour if isinstance(values_by_place_data_frame, self.robjects.vectors.StrVector): raise Exception(str(values_by_place_data_frame)) elif values_by_place_data_frame.ncol == 0: keys = [] values = [] else: keys = values_by_place_data_frame.rx2("key") values = values_by_place_data_frame.rx2("value") db = current.db try: csv_data_file = open(file_path, "w") write = csv_data_file.write #min(grid_sizes(expression)) write( "latitude,longitude,station_id,station_name,elevation,%s\n" % (units)) place_ids = {} table = db.climate_place etable = db.climate_place_elevation itable = db.climate_place_station_id ntable = db.climate_place_station_name for place_row in db( # only show Nepal (table.longitude > 79.5) & (table.longitude < 88.5) & (table.latitude > 26.0) & (table.latitude < 30.7)).select( table.id, table.longitude, table.latitude, etable.elevation_metres, itable.station_id, ntable.name, left=(etable.on(table.id == etable.id), db.climate_place_station_id.on( table.id == itable.id), db.climate_place_station_name.on( table.id == ntable.id))): place_ids[place_row.climate_place.id] = place_row for place_id, value in zip(keys, values): place = place_ids[place_id] write(",".join( map(str, (place.climate_place.latitude, place.climate_place.longitude, place.climate_place_station_id.station_id or "", place.climate_place_station_name.name or "", place.climate_place_elevation.elevation_metres or "", round_to_4_sd(value))))) write("\n") except: csv_data_file.close() os.unlink(file_path) raise finally: csv_data_file.close() return get_cached_or_generated_file( hashlib.md5(understood_expression_string).hexdigest() + ".csv", generate_map_csv_data)
def generate_chart(file_path): time_serieses = [] R = self.R c = R("c") spec_names = [] for spec in specs: query_expression = spec["query_expression"] expression = DSL.parse(query_expression) understood_expression_string = str(expression) spec_names.append(understood_expression_string) units = DSL.units(expression) unit_string = str(units) if units is None: analysis_strings = [] def analysis_out(*things): analysis_strings.append("".join(map(str, things))) DSL.analysis(expression, analysis_out) raise MeaninglessUnitsException( "\n".join(analysis_strings)) yearly_values = "Months(" in query_expression code = DSL.R_Code_for_values( expression, ["time_period", "(time_period - (time_period % 12))"][yearly_values], "place_id IN (%s)" % ",".join(map(str, spec["place_ids"]))) values_by_time_period_data_frame = R(code)() data = {} if isinstance(values_by_time_period_data_frame, self.robjects.vectors.StrVector): raise Exception(str(values_by_time_period_data_frame)) elif values_by_time_period_data_frame.ncol == 0: pass else: add = data.__setitem__ for key, value in zip( values_by_time_period_data_frame.rx2("key"), values_by_time_period_data_frame.rx2("value")): add(key, value) # assume monthly values and monthly time_period start_month_number = min(data.iterkeys()) start_year, start_month = month_number_to_year_month( start_month_number) end_month_number = max(data.iterkeys()) end_year, end_month = month_number_to_year_month( end_month_number) try: display_units = { "Kelvin": "Celsius", }[unit_string] except KeyError: converter = lambda x: x display_units = unit_string else: converter = units_in_out[display_units]["out"] values = [] for month_number in range(start_month_number, end_month_number + 1, [1, 12][yearly_values]): if not data.has_key(month_number): values.append(None) else: values.append(converter(data[month_number])) if yearly_values: time_serieses.append( R("ts")(self.robjects.FloatVector(values), start=c(start_year, start_month), end=c(end_year, end_month), frequency=1)) else: time_serieses.append( R("ts")(self.robjects.FloatVector(values), start=c(start_year, start_month), end=c(end_year, end_month), frequency=12)) R(("png(filename = '%(file_path)s', " "width = %(width)i, " "height = %(height)i" ")") % dict(file_path=file_path, width=width, height=height)) plot_chart = R( "function (xlab, ylab, n, names, ...) {" "par(xpd = T, mar=par()$mar+c(0,0,4,0))\n" "ts.plot(...," "gpars = list(xlab=xlab, ylab=ylab, bg=c(1:n), pch=c(21:25), type='%(plot_type)s')" ")\n" "legend(" "par()$usr[1]," "par()$usr[4]+4," "names," "cex=0.8, pt.bg=c(1:n), pch=c(21:25), bty='n'" ")\n" "}" % dict(plot_type="lo"[yearly_values])) plot_chart("Time", display_units, len(time_serieses), spec_names, *time_serieses) R("dev.off()")