Пример #1
0
def results_home(request):
    from Results.csv_generator import SUMMARY_FILE_NAME
    path_ex = workspace_path(scenario_filename() +"/*.csv")
    start = workspace_path()
    context = {'supplemental_files': [os.path.relpath(file_path, start=start) for file_path in glob(path_ex)]}
    summary_path = os.path.join(scenario_filename(), SUMMARY_FILE_NAME)
    try:
        context['supplemental_files'].remove(summary_path)
    #             context['supplemental_files'] = [file for file in context['supplemental_files'] if not file.endswith(SUMMARY_FILE_NAME)] # filter out summary.csv
    except ValueError: pass
    context['summary_file_name'] = summary_path

    if os.path.exists(map_zip_file()):
        context['supplemental_files'].append(os.path.relpath(map_zip_file(), start=start))
    # TODO: value dict file sizes
    if DailyControls.objects.all().count() > 0:
        context['summary'] = Results.summary.summarize_results()
        context['iterations'] = len(list_of_iterations())
        context['population_eta'] = Unit.objects.count() / 650  # estimate slow map calc in matplotlib
        try:
            v = ResultsVersion.objects.get()
            context['version_number'] = '.'.join([v.versionMajor, v.versionMinor, v.versionRelease])
        except:  # more specific exceptions kept leaking through
            pass
    return render(request, 'Results/SimulationProgress.html', context)
def population_results_map():
    fig, ax = pyplot.subplots(subplot_kw=dict(axisbg='#DDDDDD'), figsize=(60,52), frameon=True)  # Issue #168 aspect ratio doesn't adjust currently
    pyplot.tight_layout()
    ax.autoscale_view('tight')
    ax.grid(color='white', linestyle='solid')
    rstyle(ax)
    ax.set_title("Population Locations and IDs", size=20)

    queryset = Unit.objects.all()
    # It might be faster to request a flat value list and then construct new tuples based on that
    latlong = [(u.latitude, u.longitude, 
                u.unitstats.cumulative_infected, 
                u.unitstats.cumulative_vaccinated,
                u.unitstats.cumulative_destroyed,
                u.unitstats.cumulative_zone_focus, 
                u.initial_size,
                "%s %s %i" % (u.production_type.name, u.user_notes, u.unitstats.cumulative_destroyed) 
                ) for u in queryset]
    total_iterations = float(len(list_of_iterations()))  # This is slower but more accurate than OutputSettings[0].iterations
    latitude, longitude, infected, vaccinated, destroyed, zone_focus, herd_size, names = zip(*latlong)
    zone_blues, red_infected, green_vaccinated = define_color_mappings()
    
    graph_zones(ax, latitude, longitude, total_iterations, zone_blues, zone_focus)
    graph_states(ax, latitude, longitude, total_iterations, infected, vaccinated, destroyed)
    
    longitude = [entry[1] for entry in latlong if not any(x > 0 for x in (entry[2], entry[3], entry[4]))]
    latitude =  [entry[0] for entry in latlong if not any(x > 0 for x in (entry[2], entry[3], entry[4]))]
    # to ensure zero occurrences has a different color
    uninvolved = ax.scatter(longitude,
                            latitude,
                            marker='s',
                            s=[max(1, size // 10) for size in herd_size],
                            color=(0.6, 0.6, 0.6, 1.0),
                            zorder=1000)
    return fig
Пример #3
0
def simulation_status(request):
    output_settings = OutputSettings.objects.get()
    status = {
        'is_simulation_stopped': is_simulation_stopped(),
        'simulation_has_started': SmSession.objects.get().simulation_has_started,
        'iterations_total': output_settings.iterations,
        'iterations_started': len(list_of_iterations()),
        'iterations_completed': iterations_complete(),
        'iteration_text': mark_safe(SmSession.objects.get().iteration_text),
    }
    return JsonResponse(status)
Пример #4
0
def results_home(request):
    path_ex = os.path.join("workspace", scenario_filename(), "*.csv")
    context = {'supplemental_files': [os.path.relpath(file_path, start="workspace") for file_path in glob(path_ex)]}
    if os.path.exists(map_zip_file()):
        context['supplemental_files'].append(os.path.relpath(map_zip_file(), start="workspace"))
    # TODO: value dict file sizes
    if DailyControls.objects.all().count() > 0:
        context['summary'] = Results.summary.summarize_results()
        context['iterations'] = len(list_of_iterations())
        context['large_population'] = Unit.objects.count() > 10000  # determines slower interactive map vs fast matplotlib
        v = ResultsVersion.objects.get()
        context['version_number'] = '.'.join([v.versionMajor, v.versionMinor, v.versionRelease])
    return render(request, 'Results/SimulationProgress.html', context)
Пример #5
0
def construct_iterating_combination_filter_dictionary(iteration, model, zone=''):
    """Truth table of iterate_pt and iterate_zone gives you four values.  One for each Daily Model.
    Whether or not iteration is specified raises it to 8 combinations of possible filters used."""
    iterate_pt, iterate_zone = {DailyByProductionType: (True, False),
                                DailyByZoneAndProductionType: (True, True),
                                DailyByZone: (False, True),
                                DailyControls: (False, False)}[model]
    production_types, zones = breakdown_dictionary(iterate_pt, iterate_zone)
    active_zone = Zone.objects.all().order_by('-radius').first()  # the only "summary" zone stats to be displayed
    if zone:
        active_zone = Zone.objects.filter(name=zone).first()
    filter_sequence = []
    columns = ['Day']
    if iteration:  # 1__ Break down lines by production type for only one iteration
        if production_types:  # 11_
            columns += list(production_types.keys())
            if iterate_zone:  # 111
                # This is the one case where we use the 'zone' parameter because there's too much information otherwise
                for name in production_types.keys():  # add one for each Production Type and "All"
                    filter_sequence.append({'iteration': iteration,
                                            'production_type_id': production_types[name],
                                            'zone': active_zone})
            else:  # 110
                for name in production_types.keys():  # add one for each Production Type and "All"
                    filter_sequence.append({'iteration': iteration, 'production_type_id': production_types[name]})
                
        else:  # 10_
            if iterate_zone:  # 101 specific iteration of DailyByZone
                columns += list(zones.keys())
                for zone_pk in zones.values():  # add one for each Zone and "Background"
                    filter_sequence.append({'iteration': iteration, 'zone_id': zone_pk})
            else:  # 100 specific iteration of DailyControl field
                columns.append("Field")
                filter_sequence.append({'iteration': iteration},)
            
    else:  # 0__ This is a summary time plot of all iterations.
        columns += ["Iteration " + str(it) for it in list_of_iterations()]  # columns names are always the same,
        # Different graph settings still only get to inject one time line graph per iteration
        if production_types:  # 01_ "All Production Type"
            if iterate_zone:  # 011 "All Production Type" and active_zone
                filter_sequence.append({'production_type': None, 'zone': active_zone})
            else:  # 010
                filter_sequence.append({'production_type': None})
            
        else:  # 00_
            if iterate_zone:  # 001 DailyByZone
                filter_sequence.append({'zone': active_zone})
            else:  # 000 DailyControls
                filter_sequence.append({})  

    return filter_sequence, columns
Пример #6
0
def create_time_series_lines(field_name, model, iteration=None, zone=''):
    filter_sequence, columns = construct_iterating_combination_filter_dictionary(iteration, model, zone=zone)
    lines = [] 
    if iteration:  # Manually step through each query for a single iteration
        for filter_dict in filter_sequence:
            lines.append(list(model.objects.filter(**filter_dict).order_by('day').values_list(field_name, flat=True)))
    else:  # summary of all iterations is a single query for performance reasons
        max_size = model.objects.all().aggregate(Max('day'))['day__max']
        for row in range(len(list_of_iterations())):  # iteration = index
            lines.append([None] * max_size)
        objs = model.objects.filter(**filter_sequence[0]).values_list('iteration', 'day', field_name)
        for entry in objs:
            lines[int(entry[0])-1][entry[1]-1] = entry[2]  # sorting done in python, iteration and day are both 1 indexed

    return lines, columns
Пример #7
0
def result_table(request, model_name, model_class, model_form, graph_links=False, prefix=''):
    """Displays a table with links to all possible graphs of each field, for every iteration.
       Issue #127"""
    ResultSet = modelformset_factory(model_class, extra=0, form=model_form)
    iterations = list_of_iterations()
    context = {'title': 'Results from %i Iterations' % len(iterations)}
    if not graph_links:  # Old behavior with a number table, only first 50 entries, useful for debug
        context['formset'] = ResultSet(queryset=model_class.objects.all().order_by('iteration', 'day')[:50])
        return render(request, 'Results/FormSet.html', context)
    else:  # New Behavior with links to a graph for every field
        context['formset'] = ResultSet(queryset=model_class.objects.all().order_by('iteration', 'day')[:5])
        context['Zones'] = Zone.objects.all()
        context['iterations'] = iterations[:5]  # It's pointless to display links to more than the first 10 iterations, there can be thousands
        context['model_name'] = model_name
        context['excluded_fields'] = ['zone', 'production_type', 'day', 'iteration', 'id', 'pk', 'last_day']
        context['excluded_fields'] += [field for field in model_class._meta.get_all_field_names() if not field.startswith(prefix)]
        context['empty_fields'] = empty_fields(model_class, context['excluded_fields'])
        context['headers'] = class_specific_headers(model_name, prefix)
        return render(request, 'Results/GraphLinks.html', context)
Пример #8
0
def population_results_map():
    """Creates a map that summarizes the range of outcomes amongst all iterations of a simulation.
    Estimated time = Unit.objects.count() / 650 in seconds.   """
    start_time = time()
    fig= Figure(figsize=(60,52), frameon=True, tight_layout=True)
    ax = fig.add_subplot(1,1,1, axisbg='#EEEEEE')
    ax.grid(color='white', linestyle='solid')
    rstyle(ax)

    queryset = Unit.objects.all()
    # It might be faster to request a flat value list and then construct new tuples based on that
    latlong = [(u.latitude, u.longitude, 
                u.unitstats.cumulative_infected, 
                u.unitstats.cumulative_vaccinated,
                u.unitstats.cumulative_destroyed,
                u.unitstats.cumulative_zone_focus, 
                u.initial_size,
                ) for u in queryset]
    total_iterations = float(len(list_of_iterations()))
    latitude, longitude, infected, vaccinated, destroyed, zone_focus, herd_size = zip(*latlong)
    zone_blues, red_infected, green_vaccinated = define_color_mappings()
    
    graph_zones(ax, latitude, longitude, total_iterations, zone_blues, zone_focus)
    graph_states(ax, latitude, longitude, total_iterations, infected, vaccinated, destroyed)
    
    neutral_longitude = [entry[1] for entry in latlong if not any(x > 0 for x in (entry[2], entry[3], entry[4]))]
    neutral_latitude = [entry[0] for entry in latlong if not any(x > 0 for x in (entry[2], entry[3], entry[4]))]
    # to ensure zero occurrences has a different color
    uninvolved = ax.scatter(neutral_longitude,
                            neutral_latitude,
                            marker='s',
                            s=[min(max(0.25, size / 100), 1000) for size in herd_size],
                            color=(0.2, 0.2, 0.2, 1.0),
                            zorder=1000)
    Results.graphing.crop_to_fit_map(ax)
    print("Population Map took %i seconds" % int(time() - start_time))
    return fig
Пример #9
0
def graph_field_png(request, model_name, field_name, iteration='', zone=''):
    model = globals()[model_name]
    iteration = int(iteration) if iteration else None
    lines, columns = create_time_series_lines(field_name, model, iteration=iteration, zone=zone)

    time_series = extend_last_day_lines(lines, model, field_name)
    
    explanation, title = construct_title(field_name, iteration, model, zone)

    use_legend = bool(iteration)

    boxplot_graph, fig, gs, time_graph = create_figure_with_boxplot(title, use_legend)

    boxplot_raw = collect_boxplot_data(time_series, explanation)  # This has already been padded
    boxplot_data = pd.DataFrame(pd.Series(boxplot_raw), columns=['Last Day'] if field_is_cumulative(explanation) else ['Distribution'])
    boxplot_data.boxplot(ax=boxplot_graph, return_type='axes')

    if iteration:  # for a single iteration, we don't need all the hist2d prep
        return single_iteration_line_graph(iteration, field_name, model_name, model, time_series, columns, time_graph, boxplot_graph, fig)
    if len(list_of_iterations()) < 50:
        # do a stacked line graph instead of a histogram    
        return single_iteration_line_graph(iteration, field_name, model_name, model, time_series, columns, time_graph, boxplot_graph, fig)

    return TwoD_histogram(fig, gs, time_graph, time_series)