return all_words if __name__ == "__main__": board = Boggle() s = Solver() N = 300 t0 = perf_counter() ts = [] for i in range(N): s.solve(board.gen()) ts.append(perf_counter() - t0) t0 = perf_counter() print(f"Time to solve: {sum(ts)/len(ts)*1000:.1f}ms") # SOLVE BEST - EXPECTED ANSWER - 2945 t0 = perf_counter() sol = s.solve(["EASAN", "BLRME", "AUIEE", "STSNS", "NURIG"]) print(sum(len(w) - 3 for w in sol), f"[{(perf_counter()-t0) * 1000:.1f}ms]") by_length = {} for w in sol: by_length[len(w)] = by_length.get(len(w), []) + [w] print(", ".join([f"{i}: {len(v)}" for i, v in by_length.items()])) print(by_length[11]) print(timer.report(nits=N, make_dict=1))
def render_data_ui(sql, metrics, show_back_to_all=False): timer = Timer() assert len(metrics) > 0, 'No metrics specified to render!' time_range = request.args.get('range') if not time_range: time_range = config.default_timerange timer.reset('STARTING...') db_name = os.getenv('RUUVI_DB', default='sqlite:///measurements-mock.db') logging.debug('Using database %s' % db_name) db = dataset.connect(db_name) # inject date filter into SQL query sql = sql.replace('[date_filter]', sql_date_filter(time_range)) all_data = pd.read_sql(sql, con=db.engine) # make sure returned data is consistent with specified metrics assert all(metric in all_data.columns for metric in metrics), \ 'The data returned is not consistent with the specified metrics. ' \ + 'Metrics: {metrics}. Data columns: {columns}'.format(metrics=metrics, columns=list(all_data.columns)) timer.report('query db into dataframe') # get names of ALL sensors to allow for consistent coloring, regardless of the sensors currently displayed all_sensors = [ r['sensor_name'] for r in db.query( "SELECT DISTINCT sensor_name FROM measurements ORDER BY sensor_name" ) ] sensor_colors = { sensor: color for sensor, color in zip(all_sensors, n_pretty_hex_colors(len(all_sensors))) } data_per_metric = { metric: all_data.pivot_table(index='t', columns='sensor_name', values=metric) for metric in metrics } # Names of the sensors currently displayed. current_sensors = data_per_metric[metrics[0]].columns # determine whether to aggregate data to a day level aggregate_to_day_level = data_per_metric[ metrics[0]].shape[0] > config.aggregate_daily_row_threshold # latest single data point for each metric latest_data = [] for sensor in current_sensors: record = {'sensor_name': sensor, 'color': sensor_colors[sensor]} record.update({ metric: data_per_metric[metric][sensor].iloc[-1] for metric in metrics }) latest_data.append(record) # history for each metric, CSV formatted to inject into dygraph def csv_data_for_metric(metric): if aggregate_to_day_level: return dataframe_for_dygraph(to_day_level(data_per_metric[metric])) else: return dataframe_for_dygraph(data_per_metric[metric]) graph_data = [{ 'metric': metric, 'csv': csv_data_for_metric(metric) } for metric in metrics] # graph options per series are different when data is aggregated on a day level - then we need to # differentiate the min and max series visually if aggregate_to_day_level: series_options = {} for sensor_name in current_sensors: series_options.update( {'max ' + sensor_name: { 'color': sensor_colors[sensor_name] }}) series_options.update({ 'min ' + sensor_name: { 'color': sensor_colors[sensor_name], 'strokePattern': [3, 3] } }) else: series_options = { sensor_name: { 'color': sensor_colors[sensor_name] } for sensor_name in current_sensors } timer.report('data transformation') rendered = render_template('ui_main.html', metrics=metrics, latest_data=latest_data, graph_data=graph_data, metric_units={ 'temperature': '°', 'pressure': ' hPa', 'humidity': '%' }, metric_icons={ 'temperature': 'thermometer', 'pressure': 'weather-lightning-rainy', 'humidity': 'water' }, ui_width=480, show_back_to_all=show_back_to_all, time_range=time_range, series_options=series_options) timer.report('template rendered') return rendered