def make_problems_for_BritSiv():
    """Brit Siv ønsket oversikt over varslede skredprobelemr og faregrader for Indre Fjordane of Fjordane
    de to siste årene (2015-2017).    """

    output_filename = '{0}Skredproblemer Indre Fjordane for BritSiv.csv'.format(
        env.output_folder)
    pickle_file_name = '{0}runavalancheproblems_britsiv.pickle'.format(
        env.local_storage)

    get_new = False
    all_dangers = []

    if get_new:
        # Get Fjordane 2015-16
        region_id = 121
        from_date, to_date = gm.get_forecast_dates('2015-16')
        all_dangers += gd.get_forecasted_dangers(region_id, from_date, to_date)

        # Get Indre fjordane 2016-17
        region_id = 3027
        from_date, to_date = gm.get_forecast_dates('2016-17')
        all_dangers += gd.get_forecasted_dangers(region_id, from_date, to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    all_problems = []
    for d in all_dangers:
        all_problems += d.avalanche_problems
    all_problems.sort(key=lambda AvalancheProblem: AvalancheProblem.date)

    _save_problems(all_problems, output_filename)
def get_all_ofoten():
    """Dangers and problems for Ofoten (former Narvik). Writes file to .csv"""

    get_new = True
    get_observations = False
    write_csv = True
    plot_dangerlevels_simple = False

    select_years = [
        '2012-13', '2013-14', '2014-15', '2015-16', '2016-17', '2017-18'
    ]
    region_id_Narvik = 114  # Narvik used from 2012 until nov 2016
    region_id_Ofoten = 3015  # Ofoten introduced in november 2016

    warnings_pickle = '{0}allforecasteddangerlevels_Ofoten_201218.pickle'.format(
        env.local_storage)
    warnings_csv = '{0}Faregrader Ofoten 2012-18.csv'.format(env.output_folder)
    warnings_plot = '{0}Faregrader Ofoten 2012-18.png'.format(
        env.output_folder)

    if get_new:
        all_warnings = []
        all_evaluations = []

        for y in select_years:

            if y in ['2016-17', '2017-18']:
                region_id = region_id_Ofoten
            else:
                region_id = region_id_Narvik

            from_date, to_date = gm.get_forecast_dates(year=y)

            all_warnings += gd.get_forecasted_dangers(region_id, from_date,
                                                      to_date)
            if get_observations:
                all_evaluations += go.get_avalanche_evaluation_3(
                    from_date, to_date, region_id)

        mp.pickle_anything([all_warnings, all_evaluations], warnings_pickle)

    else:
        [all_warnings, all_evaluations] = mp.unpickle_anything(warnings_pickle)

    if write_csv:
        # write to csv files
        _save_danger_and_problem_to_file(all_warnings, warnings_csv)

    elif plot_dangerlevels_simple:
        # Make simple plot
        from_date = gm.get_forecast_dates(select_years[0])[0]
        to_date = gm.get_forecast_dates(select_years[-1])[1]
        _make_plot_dangerlevels_simple(all_warnings, all_evaluations,
                                       warnings_plot, from_date, to_date)

    else:
        print("No output selected")

    return all_warnings, all_evaluations
def make_forecasts_for_Christian():
    """Christian Jaedicke ønsker oversikt over varsel og skredproblemer siste tre år i Narvik."""

    pickle_file_name = '{0}forecasts_ofoten_christian.pickle'.format(
        env.local_storage)

    get_new = False
    all_dangers = []

    if get_new:
        # Get Narvik 2014-15 and 2015-16
        region_id = 114

        from_date, to_date = gm.get_forecast_dates('2014-15')
        all_dangers += gd.get_forecasted_dangers(region_id, from_date, to_date)

        from_date, to_date = gm.get_forecast_dates('2015-16')
        all_dangers += gd.get_forecasted_dangers(region_id, from_date, to_date)

        # Get Indre fjordane 2016-17
        region_id = 3015
        from_date, to_date = gm.get_forecast_dates('2016-17')
        all_dangers += gd.get_forecasted_dangers(region_id, from_date, to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    output_forecast_problems = '{0}Varsel Ofoten for Christian.csv'.format(
        env.output_folder)

    import collections as coll

    # Write forecasts to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for d in all_dangers:
            for p in d.avalanche_problems:
                out_data = coll.OrderedDict([
                    ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                    ('Region id', p.region_regobs_id),
                    ('Region', p.region_name), ('DL', p.danger_level),
                    ('Danger level', p.danger_level_name),
                    ('Problem order', p.order), ('Problem', p.problem),
                    ('Cause/ weaklayer', p.cause_name), ('Type', p.aval_type),
                    ('Size', p.aval_size), ('Trigger', p.aval_trigger),
                    ('Probability', p.aval_probability),
                    ('Distribution', p.aval_distribution)
                ])
                if make_header:
                    f.write(
                        ' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                    make_header = False
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.values()]) + '\n')
def make_forecasts_for_Sander():
    """2018 August: Hei igjen Ragnar.
    Har du statistikk på varsla faregrad over ein heil sesong for Noreg? Eit snitt. XX dagar med faregrad 1,
    XX dagar med faregrad 2, XX dagar med faregrad 3.... fordelt på XX varslingsdagar.

    :return:
    """

    pickle_file_name = '{0}201808_avalanche_forecasts_sander.pickle'.format(
        env.local_storage)

    get_new = False
    all_dangers = []

    if get_new:

        years = ['2012-13', '2013-14', '2014-15', '2015-16']
        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            region_ids = gm.get_forecast_regions(y, get_b_regions=True)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        years = ['2016-17', '2017-18']
        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            region_ids = gm.get_forecast_regions(y, get_b_regions=True)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    output_forecast_problems = '{0}201808 Faregrader for Sander.txt'.format(
        env.output_folder)

    import pandas as pd

    all_dangers_dict = []
    for a in all_dangers:
        all_dangers_dict.append(a.__dict__)

    col_names = list(all_dangers_dict[0].keys())
    all_dangers_df = pd.DataFrame(all_dangers_dict,
                                  columns=col_names,
                                  index=range(0, len(all_dangers_dict), 1))

    a = 1
def _axis_date_labels_from_year(year):
    """For a season (year) get labels for the first day in the month and positions on x axis."""
    axis_dates = []
    axis_positions = []
    from_date, to_date = gm.get_forecast_dates(year)

    for i in range(0, (to_date - from_date).days + 1, 1):
        date = from_date + dt.timedelta(days=i)
        if date.day == 1:
            axis_dates.append(date.strftime("%b %Y"))
            axis_positions.append(i)

    return axis_dates, axis_positions
Beispiel #6
0
def _get_all_snow(get_new=False):

    file_name = '{}observations and forecasts 2012-17.pickle'.format(
        env.local_storage)

    if get_new:
        all_observations = go.get_all_registrations('2012-12-01',
                                                    '2017-07-01',
                                                    geohazard_tids=10)

        years = ['2012-13', '2013-14', '2014-15', '2015-16', '2016-17']
        all_forecasts = []
        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            region_ids = gm.get_forecast_regions(y)
            all_forecasts += gfa.get_avalanche_warnings(
                region_ids, from_date, to_date)

        mp.pickle_anything([all_observations, all_forecasts], file_name)

    else:
        [all_observations, all_forecasts] = mp.unpickle_anything(file_name)

    return all_observations, all_forecasts
def get_all_forecasts(year, lang_key=1, max_file_age=23):
    """Specialized method for getting all forecasts for one season.
    For the current season (at the time of writing, 2018-19), if a request
    has been made the last 23hrs, data is retrieved from a locally stored pickle,
    if not, new request is made to the regObs api. Previous seasons are not
    requested if a pickle is found in local storage.

    :param year:                [string] Eg. season '2017-18'
    :param lang_key             [int] 1 is norwegian, 2 is english
    :param max_file_age:        [int] hrs how old the file is before new is retrieved

    :return valid_forecasts:    [list of AvalancheWarning]
    """

    from_date, to_date = gm.get_forecast_dates(year=year)
    file_name = '{0}all_forecasts_{1}_lk{2}.pickle'.format(
        env.local_storage, year, lang_key)
    file_date_limit = dt.datetime.now() - dt.timedelta(hours=max_file_age)

    # if we are well out of the current season (30 days) its little chance the data set has changed.
    current_season = gm.get_season_from_date(dt.date.today() -
                                             dt.timedelta(30))

    # Get forecast regions used in the current year
    region_ids = gm.get_forecast_regions(year, get_b_regions=True)

    get_new = True

    if os.path.exists(file_name):
        # if file contains a season long gone, dont make new.
        if year == current_season:
            file_age = dt.datetime.fromtimestamp(os.path.getmtime(file_name))
            # If file is newer than the given time limit, dont make new.
            if file_age > file_date_limit:
                # If file size larger than that of an nearly empty file, dont make new.
                if os.path.getsize(file_name) > 100:  # 100 bytes limit
                    get_new = False
        else:
            get_new = False

    if get_new:
        lg.info(
            "getvarsompickles.py -> get_all_forecasts: Get new {0} forecasts and pickle."
            .format(year))

        all_forecasts = gfa.get_avalanche_warnings(region_ids,
                                                   from_date,
                                                   to_date,
                                                   lang_key=lang_key)

        # Valid forecasts have a danger level. The other are empty.
        valid_forecasts = []
        for f in all_forecasts:
            if f.danger_level > 0:
                valid_forecasts.append(f)

        mp.pickle_anything(valid_forecasts, file_name)

    else:
        valid_forecasts = mp.unpickle_anything(file_name)

    return valid_forecasts
            x + 20, -270,
            '*** {0} ganger er ett snoeskred med hoeysete index. \n'
            '    {1} som skredaktivitet og {2} med skjema for \n'
            '    enkeltskred.'.format(est_num_1, est_num_1_aval_act,
                                      est_num_1_aval))

    return


if __name__ == "__main__":

    season = '2017-18'

    ### Get all regions
    region_ids = gm.get_forecast_regions(season)
    from_date, to_date = gm.get_forecast_dates(season)
    # from_date, to_date = '2017-12-01', '2018-02-01'
    # region_ids = [3014, 3015]

    ### get and make the data set
    date_region, forecasted_dangers = step_1_make_data_set(
        region_ids, from_date, to_date)
    mp.pickle_anything([date_region, forecasted_dangers],
                       '{0}runforavalancheactivity_step_1.pickle'.format(
                           env.local_storage))

    ### Find the observaton of highest value pr region pr date
    date_region, forecasted_dangers = mp.unpickle_anything(
        '{0}runforavalancheactivity_step_1.pickle'.format(env.local_storage))
    date_region = step_2_find_most_valued(date_region)
    mp.pickle_anything([date_region, forecasted_dangers],
def make_forecasts_for_Thea():
    """July 2018: Make list of avalanche forecasts danger levels for regions Voss, Romsdalen, Svartisen
    and Salten (and those before them) for Thea Møllerhaug Lunde (Jernbanedirektoratet).

    Voss-Bergen ligger i for det meste i Voss-regionen vår.
    Mo i Rana-Fauske ligger i Svartisen og Salten.
    Åndalsnes-Bjorli ligger i varslingsregionen Romsdalen."""

    pickle_file_name = '{0}201807_avalanche_forecasts_thea.pickle'.format(
        env.local_storage)

    get_new = False
    all_dangers = []

    if get_new:
        # Get Voss. ForecastRegionTID 124 form 2012-2016 and 3031 since.
        # Get Romsdalen. ForecastRegionTID 118 from 2012-2016 and 3023 since.
        # Get Svartisen. ForecastRegionTID 131 from 2012-2016 and 3017 since.
        # Get Salten. ForecastRegionTID 133 form 2012-2016 and 3016 since.

        years = ['2012-13', '2013-14', '2014-15', '2015-16']
        region_ids = [124, 118, 131, 133]

        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        years = ['2016-17', '2017-18']
        region_ids = [3031, 3023, 3017, 3016]

        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    output_forecast_problems = '{0}201807 Snøskredvarsel for Thea.txt'.format(
        env.output_folder)

    import collections as coll

    # Write forecasts to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for d in all_dangers:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(d.date, '%Y-%m-%d')),
                ('Region id', d.region_regobs_id),
                ('Region', d.region_name),
                ('DL', d.danger_level),
                ('Danger level', d.danger_level_name),
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')

    pass
def make_forecasts_for_Heidi():
    """July 2018: Make list of avalanche forecasts for regions Voss, Svartisen og Fauske (and those before them)
    for Heidi Bjordal SVV"""

    pickle_file_name = '{0}201807_avalanche_forecasts_heidi.pickle'.format(
        env.local_storage)

    get_new = False
    all_dangers = []

    if get_new:
        # Get Voss. ForecastRegionTID 124 form 2012-2016 and 3031 since.
        # Get Svartisen. ForecastRegionTID 131 form 2012-2016 and 3017 since.
        # Get Salten. ForecastRegionTID 133 form 2012-2016 and 3016 since.

        years = ['2012-13', '2013-14', '2014-15', '2015-16']
        region_ids = [124, 131, 133]

        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        years = ['2016-17', '2017-18']
        region_ids = [3031, 3017, 3016]

        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    output_forecast_problems = '{0}201807 Snøskredvarsel for Heidi.txt'.format(
        env.output_folder)

    import collections as coll

    # Write forecasts to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for d in all_dangers:
            for p in d.avalanche_problems:
                out_data = coll.OrderedDict([
                    ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                    ('Region id', p.region_regobs_id),
                    ('Region', p.region_name), ('DL', p.danger_level),
                    ('Danger level', p.danger_level_name),
                    ('Problem order', p.order), ('Problem', p.problem),
                    ('Cause/ weaklayer', p.cause_name), ('Type', p.aval_type),
                    ('Size', p.aval_size), ('Trigger', p.aval_trigger),
                    ('Probability', p.aval_probability),
                    ('Distribution', p.aval_distribution)
                ])
                if make_header:
                    f.write(
                        ' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                    make_header = False
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.values()]) + '\n')

    pass
from utilities import fencoding as fe
import setenvironment as env

__author__ = 'Ragnar Ekker'

years = ['2014-15', '2015-16', '2016-17', '2017-18']

forecast_problems = []
forecast_dangers = []
observed_dangers = []
observed_problems = []

for y in years:
    # Get forecast data. Different region ids from year to year.
    region_ids = gm.get_forecast_regions(year=y)
    from_date, to_date = gm.get_forecast_dates(y)
    forecast_problems += gp.get_forecasted_problems(region_ids,
                                                    from_date,
                                                    to_date,
                                                    lang_key=1)
    forecast_dangers += gd.get_forecasted_dangers(region_ids,
                                                  from_date,
                                                  to_date,
                                                  lang_key=1)

    # Get observed data. All older data in regObs have been mapped to new regions.
    region_ids = gm.get_forecast_regions(year='2016-17')
    from_date, to_date = gm.get_forecast_dates(y,
                                               padding=dt.timedelta(days=20))
    current_years_observed_dangers = gd.get_observed_dangers(region_ids,
                                                             from_date,
def make_avalanche_problemes_for_techel():
    """Gets forecastes and observed avalanche problems and dangers for Frank Techel.

    Takes 20-30 min to run a year.

    :return:
    """

    pickle_file_name = '{0}runavalancheproblems_techel.pickle'.format(
        env.local_storage)

    years = ['2014-15', '2015-16', '2016-17', '2017-18']
    get_new = False

    if get_new:
        forecast_problems = []
        forecast_dangers = []
        observed_dangers = []
        observed_problems = []

        for y in years:
            # Get forecast data. Different region ids from year to year.
            region_ids = gm.get_forecast_regions(year=y)
            from_date, to_date = gm.get_forecast_dates(y)
            forecast_problems += gp.get_forecasted_problems(region_ids,
                                                            from_date,
                                                            to_date,
                                                            lang_key=2)
            forecast_dangers += gd.get_forecasted_dangers(region_ids,
                                                          from_date,
                                                          to_date,
                                                          lang_key=2)

            # Get observed data. All older data in regObs have been mapped to new regions.
            region_ids = gm.get_forecast_regions(year='2016-17')
            from_date, to_date = gm.get_forecast_dates(
                y, padding=dt.timedelta(days=20))
            this_years_observed_dangers = gd.get_observed_dangers(region_ids,
                                                                  from_date,
                                                                  to_date,
                                                                  lang_key=2)
            this_years_observed_problems = gp.get_observed_problems(region_ids,
                                                                    from_date,
                                                                    to_date,
                                                                    lang_key=2)

            # Update observations with forecast region ids and names used the respective years
            for od in this_years_observed_dangers:
                utm33x = od.metadata['Original data'].UTMEast
                utm33y = od.metadata['Original data'].UTMNorth
                region_id, region_name = gm.get_forecast_region_for_coordinate(
                    utm33x, utm33y, y)
                od.region_regobs_id = region_id
                od.region_name = region_name

            for op in this_years_observed_problems:
                utm33x = op.metadata['Original data']['UtmEast']
                utm33y = op.metadata['Original data']['UtmNorth']
                region_id, region_name = gm.get_forecast_region_for_coordinate(
                    utm33x, utm33y, y)
                op.region_regobs_id = region_id
                op.region_name = region_name

            observed_dangers += this_years_observed_dangers
            observed_problems += this_years_observed_problems

        mp.pickle_anything([
            forecast_problems, forecast_dangers, observed_dangers,
            observed_problems
        ], pickle_file_name)

    else:
        [
            forecast_problems, forecast_dangers, observed_dangers,
            observed_problems
        ] = mp.unpickle_anything(pickle_file_name)

    # Run EAWS mapping on all problems
    for p in forecast_problems:
        p.map_to_eaws_problems()

    for p in observed_problems:
        p.map_to_eaws_problems()

    output_forecast_problems = '{0}Techel forecast problems.csv'.format(
        env.output_folder)
    output_forecast_dangers = '{0}Techel forecast dangers.csv'.format(
        env.output_folder)
    output_observed_problems = '{0}Techel observed problems.csv'.format(
        env.output_folder)
    output_observed_dangers = '{0}Techel observed dangers.csv'.format(
        env.output_folder)

    import collections as coll

    # Write observed dangers to file
    with open(output_observed_dangers, 'w', encoding='utf-8') as f:
        make_header = True
        for d in observed_dangers:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(d.date, '%Y-%m-%d')),
                ('Reg time',
                 dt.datetime.strftime(d.registration_time, '%Y-%m-%d %H:%M')),
                ('Region id', d.region_regobs_id),
                ('Region', d.region_name),
                ('Municipal', d.municipal_name),
                ('Nick', d.nick),
                ('Competence', d.competence_level),
                ('DL', d.danger_level),
                ('Danger level', d.danger_level_name),
                ('Forecast correct', d.forecast_correct),
                # ('Table', d.data_table),
                # ('URL', d.url),
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')

    # Write forecasted dangers to file
    with open(output_forecast_dangers, 'w', encoding='utf-8') as f:
        make_header = True
        for d in forecast_dangers:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(d.date, '%Y-%m-%d')),
                ('Region id', d.region_regobs_id),
                ('Region', d.region_name),
                ('Nick', d.nick),
                ('DL', d.danger_level),
                ('Danger level', d.danger_level_name),
                # ('Table', d.data_table),
                # ('URL', d.url),
                ('Main message', ' '.join(d.main_message_en.split()))
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')

    # Write forecasted problems to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for p in forecast_problems:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                ('Region id', p.region_regobs_id),
                ('Region', p.region_name),
                ('Nick', p.nick_name),
                ('Problem order', p.order),
                ('Problem', p.problem),
                ('EAWS problem', p.eaws_problem),
                ('Cause/ weaklayer', p.cause_name),
                # ('TypeTID', p.aval_type_tid),
                ('Type', p.aval_type),
                ('Size', p.aval_size),
                ('Trigger', p.aval_trigger),
                ('Probability', p.aval_probability),
                ('Distribution', p.aval_distribution),
                ('DL', p.danger_level),
                ('Danger level', p.danger_level_name),
                # ('Table', p.regobs_table),
                # ('URL', p.url)
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')

    # Write observed problems to file
    with open(output_observed_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for p in observed_problems:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                ('Reg time',
                 dt.datetime.strftime(p.registration_time, '%Y-%m-%d %H:%M')),
                ('Region id', p.region_regobs_id),
                ('Region', p.region_name),
                ('Municipal', p.municipal_name),
                ('Nick', p.nick_name),
                ('Competence', p.competence_level),
                # ('Problem order', p.order),
                ('EAWS problem', p.eaws_problem),
                ('Cause/ weaklayer', p.cause_name),
                # ('TypeTID', p.aval_type_tid),
                ('Type', p.aval_type),
                ('Catch 1', p.cause_attribute_crystal),
                ('Catch 2', p.cause_attribute_light),
                ('Catch 3', p.cause_attribute_soft),
                ('Catch 4', p.cause_attribute_thin),
                ('Size', p.aval_size),
                ('Trigger', p.aval_trigger),
                # ('Probability', p.aval_probability),
                # ('Distribution', p.aval_distribution),
                # ('RegID', p.regid),
                # ('Table', p.regobs_table),
                # ('URL', p.url)
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')
Beispiel #13
0
def make_forecasts_for_Espen_at_sweco():
    """Hei. I forbindelse med et prosjekt i Sørreisa i Troms ønsker vi å gi råd til vår kunde om evakuering av bygg
    i skredutsatt terreng. Som en del av vår vurdering hadde det vært veldig nyttig med statistikk for varslingen,
    altså statistikk om hvor ofte de ulike faregradene er varslet. Er det mulig å få tak i slik statistikk?
    Gjerne så langt tilbake i tid som mulig. Vennlig hilsen Espen Eidsvåg"""

    pickle_file_name = '{0}forecasts_sorreisa_espen.pickle'.format(
        env.local_storage)

    get_new = True
    all_dangers = []

    if get_new:

        years = ['2012-13', '2013-14', '2014-15', '2015-16']
        region_ids = [110, 112]  # Senja, Bardu
        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            for region_id in region_ids:
                all_dangers += gd.get_forecasted_dangers(
                    region_id, from_date, to_date)

        years = ['2016-17', '2017-18', '2018-19']
        region_ids = [3012, 3013]  # Sør Troms, Indre Troms
        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            for region_id in region_ids:
                all_dangers += gd.get_forecasted_dangers(
                    region_id, from_date, to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    output_forecast_problems = '{0}Varsel for Sørreisa.Espen Eidsvåg Sweco.csv'.format(
        env.output_folder)

    import collections as coll

    # Write forecasts to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for d in all_dangers:
            for p in d.avalanche_problems:
                out_data = coll.OrderedDict([
                    ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                    ('Region id', p.region_regobs_id),
                    ('Region', p.region_name), ('DL', p.danger_level),
                    ('Danger level', p.danger_level_name),
                    ('Problem order', p.order), ('Problem', p.problem),
                    ('Cause/ weaklayer', p.cause_name), ('Type', p.aval_type),
                    ('Size', p.aval_size), ('Trigger', p.aval_trigger),
                    ('Probability', p.aval_probability),
                    ('Distribution', p.aval_distribution)
                ])
                if make_header:
                    f.write(
                        ' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                    make_header = False
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.values()]) + '\n')
Beispiel #14
0
 def set_x_from_date(self):
     current_season = gm.get_season_from_date(self.date)
     from_date, to_date = gm.get_forecast_dates(current_season)
     x = (self.date - from_date).days
     return x
def make_dl_incident_markus():
    """
    From the beginning of time:

    get all forecasts.
    and then get how many on dl 3.

    get all incidents,
    excpt elrapp, and all in back country

    all these, get all on days in regions of dl 3.
    get all with serious caracter on days and in regions on dl 3

    :return:
    """

    pickle_file_name = '{0}incident_on_dl3_for_markus.pickle'.format(
        env.local_storage)
    years = ['2012-13', '2013-14', '2014-15', '2015-16', '2016-17']
    get_new = False

    all_dangers = []
    all_incidents = []

    if get_new:
        for y in years:

            # get forecast regions used this year
            from_date, to_date = gm.get_forecast_dates(y)

            # get incidents for this year and map to this years forecast regions
            this_year_incidents = go.get_incident(from_date,
                                                  to_date,
                                                  geohazard_tids=10)
            for i in this_year_incidents:
                utm33x = i.UTMEast
                utm33y = i.UTMNorth
                region_id, region_name = gm.get_forecast_region_for_coordinate(
                    utm33x, utm33y, y)
                i.region_regobs_id = region_id
                i.region_name = region_name
            all_incidents += this_year_incidents

            # get regions and the forecasts used this year
            region_ids = gm.get_forecast_regions(y)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        # in the end, pickle it all
        mp.pickle_anything([all_dangers, all_incidents], pickle_file_name)

    else:
        [all_dangers, all_incidents] = mp.unpickle_anything(pickle_file_name)

    all_dl3 = []
    for d in all_dangers:
        if d.danger_level == 3:
            all_dl3.append(d)

    all_back_country_incidents = []
    for i in all_incidents:
        if 'drift@svv' not in i.NickName:
            # if activity influenced is backcounty og scooter
            # should probably include 100 which is non specified incidents
            # giving this dataset the observations not specified
            if i.ActivityInfluencedTID in [
                    100, 110, 111, 112, 113, 114, 115, 116, 117, 130
            ]:
                all_back_country_incidents.append(i)

    all_back_country_incidents_with_consequence = []
    for i in all_back_country_incidents:
        # If damageextent is nestenulykke, personer skadet eller personer omkommet
        if i.DamageExtentTID > 28:
            all_back_country_incidents_with_consequence.append(i)

    # find incidents in regions on days with danger level 3
    # find incidetns in region on day with dl3
    all_back_country_incidents_on_region_dl3 = []
    all_back_country_incidents_with_consequence_on_region_dl3 = []

    for d in all_dl3:
        danger_date = d.date
        danger_region_id = d.region_regobs_id

        for i in all_back_country_incidents:
            incident_date = i.DtObsTime.date()
            incident_region_id = i.ForecastRegionTID
            if incident_date == danger_date and incident_region_id == danger_region_id:
                all_back_country_incidents_on_region_dl3.append(i)

        for i in all_back_country_incidents_with_consequence:
            incident_date = i.DtObsTime.date()
            incident_region_id = i.ForecastRegionTID
            if incident_date == danger_date and incident_region_id == danger_region_id:
                all_back_country_incidents_with_consequence_on_region_dl3.append(
                    i)

    print('Totalt varsler laget siden tidenes morgen: {}'.format(
        len(all_dangers)))
    print('Totalt varsler på fg 3: {}'.format(len(all_dl3)))
    print('Totalt antall hendelser i baklandet: {}'.format(
        len(all_back_country_incidents)))
    print('Totalt antall hendelser i baklandet med konsekvens: {}'.format(
        len(all_back_country_incidents_with_consequence)))
    print(
        'Totalt antall hendelser i baklandet i regioner på dager med fg3: {}'.
        format(len(all_back_country_incidents_on_region_dl3)))
    print(
        'Totalt antall hendelser i baklandet i regioner på dager med fg3 med konsekvens: {}'
        .format(
            len(all_back_country_incidents_with_consequence_on_region_dl3)))

    return
Beispiel #16
0
def _plot_causes(region_name,
                 causes,
                 year='2018-19',
                 plot_folder=env.plot_folder + 'regionplots/'):
    """Plots observed and forecasted causes for a region for a given year.

    :param region_name:
    :param year:            [string]
    :param causes:
    :param plot_folder:
    :return:
    """

    if not os.path.exists(plot_folder):
        os.makedirs(plot_folder)

    from_date, to_date = gm.get_forecast_dates(year)

    filename = '{0} skredproblemer {1}'.format(region_name, year)
    ml.log_and_print(
        "[info] plotdangerandproblem.py -> plot_causes: Plotting {0}".format(
            filename))

    aval_cause_kdv = gkdv.get_kdv('AvalCauseKDV')
    list_of_causes = [
        0, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
    ]
    list_of_causes = [10, 15, 14, 11, 13, 18, 19, 16, 22, 20, 24, 0]
    # list_of_causes = set([c.cause_tid for c in causes])
    list_of_cause_names = [aval_cause_kdv[tid].Name for tid in list_of_causes]

    dict_of_causes = {}
    for c in list_of_causes:
        dict_of_causes[c] = []
    for c in causes:
        dict_of_causes[c.cause_tid].append(c)

    # Start plotting
    fsize = (16, 7)
    plt.figure(figsize=fsize)
    plt.clf()

    # plot lines and left and bottom ticks
    y = 0
    for k, values in dict_of_causes.items():
        for v in values:
            x = (v.date - from_date).days
            if 'Forecast' in v.source:
                plt.hlines(y - 0.1, x, x + 1, lw=4,
                           color='red')  # offset the line 0.1 up
            if 'Observation' in v.source:
                plt.hlines(y + 0.1, x, x + 1, lw=4,
                           color='blue')  # offset the line 0.1 down
        y += 1

    # Left y-axis labels
    plt.ylim(len(list_of_causes) - 1, -1)  # 12 skredproblemer
    plt.yticks(range(len(list_of_causes) + 1), list_of_cause_names)

    # x-axis labels
    axis_dates = []
    axis_positions = []
    for i in range(0, (to_date - from_date).days, 1):
        date = from_date + dt.timedelta(days=i)
        if date.day == 1:
            axis_dates.append(date.strftime("%b %Y"))
            axis_positions.append(i)
    plt.xticks(axis_positions, axis_dates)

    # Right hand side y-axis
    right_ticks = []
    correlation_sum = 0.
    for k, values in dict_of_causes.items():
        values_obs = [vo for vo in values if 'Observation' in vo.source]
        values_fc = [vf for vf in values if 'Forecast' in vf.source]
        correlation = 0.
        for obs in values_obs:
            for fc in values_fc:
                if obs.date == fc.date and obs.cause_tid == fc.cause_tid:
                    correlation += 1
        if len(values_obs) == 0 and len(values_fc) == 0:
            right_ticks.append("")
        else:
            if len(values_obs) == 0:
                right_ticks.append("v{0} o{1} s{2}%".format(
                    len(values_fc), len(values_obs), 0))
            else:
                right_ticks.append("v{0} o{1} s{2}%".format(
                    len(values_fc), len(values_obs),
                    int(correlation / len(values_obs) * 100)))
        correlation_sum += correlation
    right_ticks.reverse()
    plt.twinx()
    plt.ylim(-1, len(right_ticks) - 1)
    plt.yticks(range(len(right_ticks) + 1), right_ticks)

    # the title
    num_obs = len([c for c in causes if 'Observation' in c.source])
    num_fc = len([c for c in causes if 'Forecast' in c.source])
    if num_obs == 0:
        correlation_prct = 0
    else:
        correlation_prct = int(correlation_sum / num_obs * 100)

    title = 'Skredproblemer for {0} ({1} - {2}) \n Totalt {3} varslede problemer (rød) og {4} observerte problemer (blå) \n og det er {5}% samsvar mellom det som er observert og det som er varselt.'\
        .format(region_name, from_date.strftime('%Y%m%d'), to_date.strftime('%Y%m%d'), num_fc, num_obs, correlation_prct)
    plt.title(title)

    # When is the figure made?
    plt.gcf().text(0.85,
                   0.02,
                   'Figur laget {0:%Y-%m-%d %H:%M}'.format(dt.datetime.now()),
                   color='0.5')

    fig = plt.gcf()
    fig.subplots_adjust(left=0.2)
    plt.savefig(u'{0}{1}'.format(plot_folder, filename))
    plt.close(fig)
Beispiel #17
0
def _plot_danger_levels(region_name,
                        danger_levels,
                        aval_indexes,
                        year='2018-19',
                        plot_folder=env.plot_folder + 'regionplots/'):
    """Plots the danger levels as bars and makes a small cake diagram with distribution.

    :param region_name:     [String] Name of forecast region
    :param year:            [string]
    :param danger_levels:
    :param aval_indexes:
    :param plot_folder:
    :return:
    """

    if not os.path.exists(plot_folder):
        os.makedirs(plot_folder)

    from_date, to_date = gm.get_forecast_dates(year)

    filename = '{0} faregrader {1}'.format(region_name, year)
    ml.log_and_print(
        "[info] plotdangerandproblem.py -> plot_danger_levels: Plotting {0}".
        format(filename))

    # Figure dimensions
    fsize = (16, 16)
    fig = plt.figure(figsize=fsize)
    plt.clf()

    ##########################################
    # First subplot with avalanche index
    ##########################################
    pplt.subplot2grid((6, 1), (0, 0), rowspan=1)

    index_dates = []
    data_indexes = []
    index_colors = []

    for i in aval_indexes:
        date = i.date
        index_dates.append(date)
        data_indexes.append(i.index)
        # color on the marker
        if i.index == 0:
            index_colors.append('white')
        elif i.index == 1:
            index_colors.append('pink')
        elif i.index >= 2 and i.index <= 5:
            index_colors.append('green')
        elif i.index >= 6 and i.index <= 9:
            index_colors.append('yellow')
        elif i.index >= 10 and i.index <= 12:
            index_colors.append('orange')
        elif i.index >= 13:
            index_colors.append('red')
        else:
            # This option should not happen.
            index_colors.append('black')
            lg.warning(
                "plotdangerandproblem.py -> plot_danger_levels: Illegal avalanche index option."
            )

    index_values = np.asarray(data_indexes, int)

    plt.scatter(index_dates, index_values, s=50., c=index_colors, alpha=0.5)
    plt.yticks([1, 4, 6, 11, 17, 22], [
        'Ingen - 1', 'Ett str2 - 4', 'Ett str3 - 6', 'Noen str3 - 11',
        'Mange str3 - 17', ''
    ])
    plt.ylabel("Skredindex")
    plt.xlim(from_date, to_date)

    title = "Faregrad og skredindeks for {0} ({1})".format(region_name, year)
    plt.title(title)

    ##########################################
    # Second subplot with avalanche danger forecast
    ##########################################
    pplt.subplot2grid((6, 1), (1, 0), rowspan=2)

    # Making the main plot
    dl_labels = [
        '', '1 - Liten', '2 - Moderat', '3 - Betydelig', '4 - Stor', ''
    ]
    dl_colors = ['0.5', '#ccff66', '#ffff00', '#ff9900', '#ff0000', 'k']

    # Making a new dataset with both warned and evaluated data
    data_dates = []
    data_dangers = []

    for d in danger_levels:
        data_dates.append(d.date)
        if 'Forecast' in d.source:
            data_dangers.append(d.danger_level)
        else:
            data_dangers.append(0. * d.danger_level)

    values = np.asarray(data_dangers, int)

    colors = []
    for n in values:
        if abs(n) == 1:
            colors.append(dl_colors[1])
        elif abs(n) == 2:
            colors.append(dl_colors[2])
        elif abs(n) == 3:
            colors.append(dl_colors[3])
        elif abs(n) == 4:
            colors.append(dl_colors[4])
        elif abs(n) == 5:
            colors.append(dl_colors[5])
        else:
            colors.append(dl_colors[0])

    plt.bar(data_dates, values, color=colors)
    plt.yticks(range(0, len(dl_labels), 1), dl_labels)  #, size='small')
    plt.ylabel("Varslet faregrad2")
    plt.xlim(from_date, to_date)

    ##########################################
    # Third subplot with avalanche danger observed
    ##########################################
    pplt.subplot2grid((6, 1), (3, 0), rowspan=2)

    dl_labels = [
        '', '1 - Liten', '2 - Moderat', '3 - Betydelig', '4 - Stor', ''
    ]
    dl_colors = ['0.5', '#ccff66', '#ffff00', '#ff9900', '#ff0000', 'k']

    # Making a new dataset with both warned and evaluated data
    data_dates = []
    data_dangers = []

    for d in danger_levels:
        data_dates.append(d.date)
        if not 'Forecast' in d.source:
            data_dangers.append(-1. * d.danger_level)
        else:
            data_dangers.append(0. * d.danger_level)

    values = np.asarray(data_dangers, int)

    colors = []
    for n in values:
        if abs(n) == 1:
            colors.append(dl_colors[1])
        elif abs(n) == 2:
            colors.append(dl_colors[2])
        elif abs(n) == 3:
            colors.append(dl_colors[3])
        elif abs(n) == 4:
            colors.append(dl_colors[4])
        elif abs(n) == 5:
            colors.append(dl_colors[5])
        else:
            colors.append(dl_colors[0])

    plt.bar(data_dates, values, color=colors)
    plt.yticks(range(0, -len(dl_labels), -1), dl_labels)
    plt.ylabel('Observert faregrad')
    plt.xticks([])
    plt.xlim(from_date, to_date)

    ##########################################
    # Forth subplot with how well the forecast is
    ##########################################
    pplt.subplot2grid((6, 1), (5, 0), rowspan=1)
    plt.xlim(from_date, to_date)

    forecast_correct_values = []
    forecast_correct_colours = []
    forecast_correct_dates = []
    for d in danger_levels:
        if 'Observation' in d.source:
            forecast_correct = d.danger_object.forecast_correct
            if forecast_correct is not None and not 'Ikke gitt' in forecast_correct:
                forecast_correct_dates.append(d.date)
                if 'riktig' in forecast_correct:
                    forecast_correct_values.append(0)
                    forecast_correct_colours.append('green')
                elif 'for lav' in forecast_correct:
                    forecast_correct_values.append(-1)
                    forecast_correct_colours.append('red')
                elif 'for høy' in forecast_correct:
                    forecast_correct_values.append(1)
                    forecast_correct_colours.append('red')
                else:
                    forecast_correct_values.append(0)
                    forecast_correct_colours.append('black')
                    lg.warning(
                        "plotdangerandproblem.py -> plot_danger_levels: Illegal option for markes on forecast correct plot."
                    )

    forecast_correct_np_values = np.asarray(forecast_correct_values, int)
    plt.scatter(forecast_correct_dates,
                forecast_correct_np_values,
                s=50.,
                c=forecast_correct_colours,
                alpha=0.5)
    plt.yticks(range(-1, 2, 1), ["For lav", "Riktig", "    For høy"])
    plt.ylabel("Stemmer varslet faregrad?")

    # this is an inset pie of the distribution of danger levels OVER the main axes
    xfrac = 0.15
    yfrac = (float(fsize[0]) / float(fsize[1])) * xfrac
    xpos = 0.45 - xfrac
    ypos = 0.95 - yfrac
    a = plt.axes([0.8, 0.66, 0.10, 0.10])
    # a = plt.axes([xpos, ypos, xfrac, yfrac])
    wDistr = np.bincount(
        [d.danger_level for d in danger_levels if 'Forecast' in d.source])
    a.pie(wDistr, colors=dl_colors, autopct='%1.0f%%', shadow=False)
    plt.setp(a, xticks=[], yticks=[])

    # this is an inset pie of the distribution of dangerlevels UNDER the main axes
    xfrac = 0.15
    yfrac = (float(fsize[0]) / float(fsize[1])) * xfrac
    xpos = 0.95 - xfrac
    ypos = 0.29 - yfrac
    b = plt.axes([0.8, 0.24, 0.10, 0.10])
    # b = plt.axes([xpos, ypos, xfrac, yfrac])
    eDistr = np.bincount(
        [d.danger_level for d in danger_levels if 'Observation' in d.source])
    b.pie(eDistr, colors=dl_colors, autopct='%1.0f%%', shadow=False)
    plt.setp(b, xticks=[], yticks=[])

    # figure text in observed danger levels subplot
    w_number, e_number, fract_same = _compare_danger_levels(danger_levels)
    fig.text(
        0.15,
        0.25,
        " Totalt {0} varslet faregrader og {1} observerte faregrader \n og det er {2}% samsvar mellom det som er observert og varslet."
        .format(w_number, e_number, int(round(fract_same * 100, 0))),
        fontsize=14)

    # fractions to the right in the forecast correct subplot
    forecast_correct_distr = {}
    for f in forecast_correct_values:
        if f in forecast_correct_distr.keys():
            forecast_correct_distr[f] += 1
        else:
            forecast_correct_distr[f] = 1

    if 1 in forecast_correct_distr.keys():
        fig.text(0.91,
                 0.19,
                 '{0}%'.format(
                     int(
                         round(
                             forecast_correct_distr[1] /
                             float(len(forecast_correct_values)) * 100, 0))),
                 fontsize=14)
    if 0 in forecast_correct_distr.keys():
        fig.text(0.91,
                 0.15,
                 '{0}%'.format(
                     int(
                         round(
                             forecast_correct_distr[0] /
                             float(len(forecast_correct_values)) * 100, 0))),
                 fontsize=14)
    if -1 in forecast_correct_distr.keys():
        fig.text(0.91,
                 0.11,
                 '{0}%'.format(
                     int(
                         round(
                             forecast_correct_distr[-1] /
                             float(len(forecast_correct_values)) * 100, 0))),
                 fontsize=14)

    # When is the figure made?
    plt.gcf().text(0.8,
                   0.02,
                   'Figur laget {0:%Y-%m-%d %H:%M}'.format(dt.datetime.now()),
                   color='0.5')

    # This saves the figure to file
    plt.savefig(u'{0}{1}'.format(plot_folder, filename))  #,dpi=90)
    plt.close(fig)