Пример #1
0
def _get_dl_prob_avindex(region_id, year='2018-19'):
    """Gets all the data needed for one region to make the plots.

    :param region_id:       [int] Region ID is an int as given i ForecastRegionKDV
    :param year:            [string]
    :return problems, dangers, aval_indexes:
    """

    all_observations = gvp.get_all_observations(year,
                                                output='FlatList',
                                                geohazard_tids=10)
    all_forecasts = gvp.get_all_forecasts(year)

    observations = []
    for o in all_observations:
        if region_id == o.ForecastRegionTID:
            observations.append(o)

    forecasts = []
    for f in all_forecasts:
        if region_id == f.region_id:
            forecasts.append(f)

    aval_indexes = gm.get_avalanche_index(observations)

    dangers_raw = []

    for f in forecasts:
        if f.danger_level > 0:
            dangers_raw.append(f)

    for o in observations:
        if isinstance(o, go.AvalancheEvaluation) or \
           isinstance(o, go.AvalancheEvaluation2) or \
           isinstance(o, go.AvalancheEvaluation3):
            dangers_raw.append(o)

    dangers = gd.make_dangers_conform_from_list(dangers_raw)

    problems_raw = []

    for f in forecasts:
        if f.danger_level > 0:
            problems_raw.append(f)

    for o in observations:
        if isinstance(o, go.AvalancheEvaluation) or \
           isinstance(o, go.AvalancheEvaluation2) or \
           isinstance(o, go.AvalancheEvalProblem2):
            problems_raw.append(o)

    problems = gp.make_problems_conform_from_list(problems_raw)

    return problems, dangers, aval_indexes
Пример #2
0
def _get_raw_varsom(year, date, days, max_file_age=23):
    if date:
        season = gm.get_season_from_date(date)
        regions = gm.get_forecast_regions(year=season, get_b_regions=True)
        aw = []
        from_date = date - dt.timedelta(days=days + 1)
        to_date = date
        single_warning = gf.get_avalanche_warnings(regions, from_date, to_date)
        for sw in single_warning:
            if sw.danger_level > 0:
                aw.append(sw)
    else:
        aw = gvp.get_all_forecasts(year=year, max_file_age=max_file_age)
    return aw
def get_season_raek(season='2018-19'):
    """Requests all forecasts (danger levels and problems) from the forecast api and writes to .csv file.

    :param season: [string] Eg. '2019-20'. If parameter is not 7 char it will not make the csv.
    """

    if len(season) == 7:
        aw = gvp.get_all_forecasts(year=season)
        aw_dict = [w.to_dict() for w in aw]
        df = pandas.DataFrame(aw_dict)
        file_and_folder = '{0}norwegian_avalanche_warnings_season_{1}_{2}.csv'.format(
            se.local_storage, season[2:4], season[5:7])
        df.to_csv(file_and_folder, index_label='index')

    else:
        lg.warning(
            'avalanchewarningscomplete.py -> get_season_raek: season parameter ist not the expected length.'
        )
Пример #4
0
def make_forecaster_data(year):
    """
    For one season, make the forecaster dictionary with all the necessary data.
    :param year:    [string] Eg. season '2018-19'
    """

    # The data
    all_warnings = gvp.get_all_forecasts(year, max_file_age=23)
    all_observation_forms = gvp.get_all_observations(year,
                                                     geohazard_tids=10,
                                                     max_file_age=23)

    forecaster_data = {}

    for w in all_warnings:
        if w.author in forecaster_data.keys():
            forecaster_data[w.author].add_warning(w)
        else:
            forecaster_data[w.author] = Forecaster(w.author)
            forecaster_data[w.author].add_warning(w)

    # number_by_author_sorted = sorted(number_by_author.items(), key=lambda kv: kv[1], reverse=True)

    for o in all_observation_forms:
        if o.NickName in forecaster_data.keys():
            forecaster_data[o.NickName].add_observation(o)

    forecaster_list_of_dict = []
    for v in forecaster_data.values():
        forecaster_list_of_dict.append(v.to_dict())

    import csv
    with open('{0}forecaster_followup.txt'.format(env.output_folder),
              'w',
              encoding='utf8') as f:
        dict_writer = csv.DictWriter(
            f, delimiter=';', fieldnames=forecaster_list_of_dict[0].keys())
        dict_writer.writeheader()
        dict_writer.writerows(forecaster_list_of_dict)

    return
Пример #5
0
def _plot_seasons_avalanche_problems(year, file_name_prefix, problem_ids,
                                     title_prefix, output_folder):
    """Supporting method for plotting one or more avalanche problems for all regions for one season.

    :param year:                [string]    Season as eg '2018-19'
    :param file_name_prefix:    [string]    File name. Year is added to string.
    :param problem_ids:         [list of int] One or more avalanche problem type id's as list.
    :param title_prefix:        [string]    title in figure. Year is added to the string.
    :param output_folder:       [string]    Full path.
    :return:
    """

    file_name = '{0} {1}'.format(file_name_prefix, year)
    all_forecasts = gvp.get_all_forecasts(year=year)

    regions_sorted_list = _region_by_region_type(all_forecasts)

    # app is for avalanche problem pixel. This list contains positions and colours of all pixels in the plot.
    list_of_app = [
        AvalancheProblemPixel(f, regions_sorted_list, problem_ids)
        for f in all_forecasts
    ]

    # Start plotting
    fsize = (16, 7)
    fig, ax = plt.subplots(1, 1, figsize=fsize)

    # Left y-axis labels
    region_names_sorted_list = [
        gm.get_forecast_region_name(i) for i in regions_sorted_list
    ]
    plt.ylim(len(regions_sorted_list) - 1, -1)
    plt.yticks(range(len(regions_sorted_list) + 1), region_names_sorted_list)

    # x-axis labels
    axis_dates, axis_positions = _axis_date_labels_from_year(year)
    plt.xticks(axis_positions, axis_dates)

    # plot lines and left and bottom ticks
    for app in list_of_app:
        plt.hlines(app.y,
                   app.x,
                   app.x + 1.2,
                   lw=15,
                   color=app.colour,
                   alpha=app.alpha)

    plt.grid(True, ls='--', lw=.5, c='k', alpha=.3)  # add grid lines
    ax.tick_params(axis=u'both', which=u'both', length=0)  # turn off ticks
    ax.spines['top'].set_visible(False)  # turn off black frame in plot
    ax.spines['right'].set_visible(False)
    ax.spines['bottom'].set_visible(False)
    ax.spines['left'].set_visible(False)

    title = '{0} {1}'.format(title_prefix, year)
    plt.title(title)

    # When is the figure made?
    plt.gcf().text(0.77,
                   0.02,
                   'Figur laget {0:%Y-%m-%d %H:%M}'.format(dt.datetime.now()),
                   color='0.5')

    # This saves the figure to file
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    plt.savefig(u'{0}{1}'.format(output_folder, file_name))
    plt.close(fig)
Пример #6
0
def plot_seasons_forecasted_danger_level(year='2019-20',
                                         output_folder=env.plot_folder +
                                         'regobsplots/'):
    """All forecasted danger levels for all regions are plotted in one figure.

    :param year:                [string]    Season as eg '2018-19'
    :param output_folder:       [string]    Full path.
    :return:
    """

    file_name = 'All danger levels {0}'.format(year)
    all_forecasts = gvp.get_all_forecasts(year=year)
    regions_sorted_list = _region_by_region_type(all_forecasts)

    # dlp is for DangerLevelPixel. This list contains positions and colours of all pixels in the plot.
    list_of_dlp = [
        DangerLevelPixel(f, regions_sorted_list) for f in all_forecasts
    ]

    # Start plotting
    fsize = (16, 7)
    fig, ax = plt.subplots(1, 1, figsize=fsize)

    # Left y-axis labels
    region_names_sorted_list = [
        gm.get_forecast_region_name(i) for i in regions_sorted_list
    ]
    plt.ylim(len(regions_sorted_list) - 1, -1)
    plt.yticks(range(len(regions_sorted_list) + 1), region_names_sorted_list)

    # x-axis labels
    axis_dates, axis_positions = _axis_date_labels_from_year(year)
    plt.xticks(axis_positions, axis_dates)

    # plot lines and left and bottom ticks
    for dlp in list_of_dlp:
        plt.hlines(dlp.y, dlp.x, dlp.x + 1.2, lw=15, color=dlp.colour)

    plt.grid(True, ls='--', lw=.5, c='k', alpha=.3)  # add grid lines
    ax.tick_params(axis=u'both', which=u'both', length=0)  # turn off ticks
    ax.spines['top'].set_visible(False)  # turn off black frame in plot
    ax.spines['right'].set_visible(False)
    ax.spines['bottom'].set_visible(False)
    ax.spines['left'].set_visible(False)

    title = 'Alle faregrader varslet sesongen {0}'.format(year)
    plt.title(title)

    plt.gcf().text(0.77,
                   0.02,
                   'Figur laget {0:%Y-%m-%d %H:%M}'.format(dt.datetime.now()),
                   color='0.5')

    # This saves the figure to file
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    plt.savefig(u'{0}{1}'.format(output_folder, file_name))
    plt.close(fig)

    pass
Пример #7
0
    def __init__(self, regobs_types,
                 seasons=('2017-18', '2018-19', '2019-20')):
        """
        Object contains aggregated data used to generate labeled datasets.
        :param regobs_types: Tuple/list of string names for RegObs observation types to fetch.
        :param seasons: Tuple/list of string representations of avalanche seasons to fetch.
        """
        self.regobs_types = regobs_types
        self.tree = {}

        aw = []
        raw_regobs = {}
        for season in seasons:
            aw += gvp.get_all_forecasts(year=season)
            regions = gm.get_forecast_regions(year=season, get_b_regions=True)
            raw_regobs = {
                **raw_regobs,
                **_get_regobs_obs(regions, season, regobs_types)
            }

        for forecast in aw:

            row = {
                # Metadata
                'region_id':
                forecast.region_id,
                'region_name':
                forecast.region_name,
                'region_type':
                forecast.region_type_name,
                'date':
                forecast.date_valid,
                'danger_level':
                forecast.danger_level,
                'emergency_warning':
                float(forecast.emergency_warning == 'Ikke gitt')
            }

            label = OrderedDict({})
            label[('CLASS', _NONE, 'danger_level')] = forecast.danger_level
            label[('CLASS', _NONE,
                   'emergency_warning')] = forecast.emergency_warning

            # Weather data
            weather = {
                'precip_most_exposed':
                forecast.mountain_weather.precip_most_exposed,
                'precip':
                forecast.mountain_weather.precip_region,
                'wind_speed':
                WIND_SPEEDS.get(forecast.mountain_weather.wind_speed, 0),
                'wind_change_speed':
                WIND_SPEEDS.get(forecast.mountain_weather.change_wind_speed,
                                0),
                'temp_min':
                forecast.mountain_weather.temperature_min,
                'temp_max':
                forecast.mountain_weather.temperature_max,
                'temp_lev':
                forecast.mountain_weather.temperature_elevation,
                'temp_freeze_lev':
                forecast.mountain_weather.freezing_level,
            }

            # We use multiple loops to get associated values near each other in e.g. .csv-files.
            for wind_dir in DIRECTIONS:
                weather[f"wind_dir_{wind_dir}"] = float(
                    forecast.mountain_weather.wind_direction == wind_dir)
            for wind_dir in DIRECTIONS:
                weather[f"wind_chg_dir_{wind_dir}"] = float(
                    forecast.mountain_weather.change_wind_direction ==
                    wind_dir)
            hours = [0, 6, 12, 18]
            for h in hours:
                weather[f"wind_chg_start_{h}"] = float(
                    forecast.mountain_weather.change_hour_of_day_start == h)
            for h in hours:
                weather[f"temp_fl_start_{h}"] = float(
                    forecast.mountain_weather.change_hour_of_day_start == h)
            row['weather'] = weather

            # Problem data
            prb = {}
            problem_types = [
                PROBLEMS.get(p.avalanche_problem_type_id, _NONE)
                for p in forecast.avalanche_problems
            ]
            problems = {}
            prb['problem_amount'] = len(forecast.avalanche_problems)
            label[('CLASS', _NONE, 'problem_amount')] = prb['problem_amount']
            for i in range(1, 4):
                label[('CLASS', _NONE, f"problem_{i}")] = _NONE
            for problem in PROBLEMS.values():
                if problem in problem_types:
                    index = problem_types.index(problem)
                    problems[problem] = forecast.avalanche_problems[index]
                    prb[f"problem_{problem}"] = -(
                        problems[problem].avalanche_problem_id - 4)
                    label[('CLASS', _NONE, f"problem_{index + 1}")] = problem
                else:
                    problems[problem] = gf.AvalancheWarningProblem()
                    prb[f"problem_{problem}"] = 0
            for problem in PROBLEMS.values():
                p_data = problems[problem]
                forecast_cause = CAUSES.get(p_data.aval_cause_id, _NONE)
                for cause in CAUSES.values():
                    prb[f"problem_{problem}_cause_{cause}"] = float(
                        forecast_cause == cause)
                prb[f"problem_{problem}_dsize"] = p_data.destructive_size_ext_id
                prb[f"problem_{problem}_prob"] = p_data.aval_probability_id
                prb[f"problem_{problem}_trig"] = {
                    10: 0,
                    21: 1,
                    22: 2
                }.get(p_data.aval_trigger_simple_id, 0)
                prb[f"problem_{problem}_dist"] = p_data.aval_distribution_id
                prb[f"problem_{problem}_lev_max"] = p_data.exposed_height_1
                prb[f"problem_{problem}_lev_min"] = p_data.exposed_height_2

                label[('CLASS', problem, "cause")] = forecast_cause
                label[('CLASS', problem,
                       "dsize")] = p_data.destructive_size_ext_id
                label[('CLASS', problem, "prob")] = p_data.aval_probability_id
                label[('CLASS', problem,
                       "trig")] = p_data.aval_trigger_simple_id
                label[('CLASS', problem, "dist")] = p_data.aval_distribution_id
                label[('CLASS', problem,
                       "lev_fill")] = p_data.exposed_height_fill

                for n in range(1, 5):
                    prb[f"problem_{problem}_lev_fill{n}"] = float(
                        p_data.exposed_height_fill == n)
                for n in range(0, 8):
                    aspect_attr_name = f"problem_{problem}_aspect_{DIRECTIONS[n]}"
                    prb[aspect_attr_name] = float(p_data.valid_expositions[n])
                label[('MULTI', problem,
                       "aspect")] = p_data.valid_expositions.zfill(8)
                label[('REAL', problem, "lev_max")] = p_data.exposed_height_1
                label[('REAL', problem, "lev_min")] = p_data.exposed_height_2

                # Check for consistency
                if prb[f"problem_{problem}_lev_min"] > prb[
                        f"problem_{problem}_lev_max"]:
                    continue

            row['problems'] = prb
            row['label'] = label

            # RegObs data
            row['regobs'] = {}
            current_regobs = raw_regobs.get(
                (forecast.region_id, forecast.date_valid), {})
            # Use 5 most competent observations, and list both categories as well as scalars
            for obs_idx in range(0, 5):
                # One type of observation (test, danger signs etc.) at a time
                for regobs_type in self.regobs_types:
                    obses = current_regobs[
                        regobs_type] if regobs_type in current_regobs else []
                    # Go through each requested class attribute from the specified observation type
                    for attr, cat in REGOBS_CLASSES[regobs_type].items():
                        # We handle categories using 1-hot, so we step through each category
                        for cat_name in cat.values():
                            attr_name = f"regobs_{REG_ENG[regobs_type]}_{_camel_to_snake(attr)}_{cat_name}_{obs_idx}"
                            row['regobs'][attr_name] = obses[obs_idx][
                                cat_name] if len(obses) > obs_idx else 0
                    # Go through all requested scalars
                    for attr, (regobs_attr,
                               conv) in REGOBS_SCALARS[regobs_type].items():
                        attr_name = f"regobs_{REG_ENG[regobs_type]}_{_camel_to_snake(attr)}_{obs_idx}"
                        try:
                            row['regobs'][attr_name] = conv(
                                obses[obs_idx]
                                [regobs_attr]) if len(obses) > obs_idx else 0
                        except TypeError:
                            row['regobs'][attr_name] = 0

            # Check for consistency
            if weather['temp_min'] > weather['temp_max']:
                continue

            self.tree[(forecast.region_id, forecast.date_valid)] = row
Пример #8
0
def make_forecaster_data_old(year):
    """For one season, make the forecaster dictionary with all the necessary data.
    :param year:    [string] Eg. season '2017-18'
    """

    # get all valid forecasts
    all_warnings = gvp.get_all_forecasts(year, max_file_age=100)

    # get authors of all forecasters.
    authors = []
    for w in all_warnings:
        if w.author not in authors:
            authors.append(w.author)

    # Make data set with dict {author: Forecaster}. Add warnings to Forecaster object.
    # Note: A list of all authors are all the keys in this dictionary.
    forecaster_dict = {}
    for w in all_warnings:
        if w.author not in forecaster_dict:
            forecaster_dict[w.author] = Forecaster(w.author)
            forecaster_dict[w.author].add_warning(w)
        else:
            forecaster_dict[w.author].add_warning(w)

    # need this below for forecaster statistics
    danger_levels_all = []  # ints
    main_texts_all = []  # strings
    avalanche_dangers_all = []  # strings
    snow_surfaces_all = []  # strings
    current_weak_layers_all = []  # strings
    problems_pr_warnings_all = []  # ints
    for w in all_warnings:
        danger_levels_all.append(w.danger_level)
        main_texts_all.append(w.main_text)
        avalanche_dangers_all.append(w.avalanche_danger)
        snow_surfaces_all.append(w.snow_surface)
        current_weak_layers_all.append(w.current_weak_layers)
        problems_pr_warnings_all.append(len(w.avalanche_problems))

    # Add data about the authors forecasts to forecaster objects in the dict
    for n, f in forecaster_dict.items():

        # add numbers of warnings made
        forecaster_dict[f.author].add_warnings_count(len(f.warnings))

        # find how many pr date valid
        dates_valid = {}
        for w in f.warnings:
            if w.date_valid not in dates_valid:
                dates_valid[w.date_valid] = 1
            else:
                dates_valid[w.date_valid] += 1
        forecaster_dict[f.author].add_dates_valid(dates_valid)

        # add data on the danger levels forecasted
        danger_levels_author = []
        for w in f.warnings:
            data = {
                'Date': w.date_valid,
                'Region': w.region_name,
                'DL': w.danger_level,
                'Danger level': w.danger_level
            }
        forecaster_dict[f.author].add_danger_levels(danger_levels_author,
                                                    danger_levels_all)

        # add data on the main texts made
        main_texts_author = [w.main_text for w in f.warnings]
        forecaster_dict[f.author].add_main_texts(main_texts_author,
                                                 main_texts_all)

        # add data on the avalanche dangers made
        avalanche_dangers_author = [w.avalanche_danger for w in f.warnings]
        forecaster_dict[f.author].add_avalanche_dangers(
            avalanche_dangers_author, avalanche_dangers_all)

        # add data on the snow surfaces forecasted
        snow_surfaces_author = [w.snow_surface for w in f.warnings]
        forecaster_dict[f.author].add_snow_surfaces(snow_surfaces_author,
                                                    snow_surfaces_all)

        # add data on the current weak layers made
        current_weak_layers_author = [
            w.current_weak_layers for w in f.warnings
        ]
        forecaster_dict[f.author].add_current_weak_layers(
            current_weak_layers_author, current_weak_layers_all)

        # add data on the avalanche problems made
        problems_pr_warnings_author = [
            len(w.avalanche_problems) for w in f.warnings
        ]
        forecaster_dict[f.author].add_problems_pr_warnings(
            problems_pr_warnings_author, problems_pr_warnings_all)

    return forecaster_dict