def make_forecasts_for_Christian():
    """Christian Jaedicke ønsker oversikt over varsel og skredproblemer siste tre år i Narvik."""

    pickle_file_name = '{0}forecasts_ofoten_christian.pickle'.format(
        env.local_storage)

    get_new = False
    all_dangers = []

    if get_new:
        # Get Narvik 2014-15 and 2015-16
        region_id = 114

        from_date, to_date = gm.get_forecast_dates('2014-15')
        all_dangers += gd.get_forecasted_dangers(region_id, from_date, to_date)

        from_date, to_date = gm.get_forecast_dates('2015-16')
        all_dangers += gd.get_forecasted_dangers(region_id, from_date, to_date)

        # Get Indre fjordane 2016-17
        region_id = 3015
        from_date, to_date = gm.get_forecast_dates('2016-17')
        all_dangers += gd.get_forecasted_dangers(region_id, from_date, to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    output_forecast_problems = '{0}Varsel Ofoten for Christian.csv'.format(
        env.output_folder)

    import collections as coll

    # Write forecasts to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for d in all_dangers:
            for p in d.avalanche_problems:
                out_data = coll.OrderedDict([
                    ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                    ('Region id', p.region_regobs_id),
                    ('Region', p.region_name), ('DL', p.danger_level),
                    ('Danger level', p.danger_level_name),
                    ('Problem order', p.order), ('Problem', p.problem),
                    ('Cause/ weaklayer', p.cause_name), ('Type', p.aval_type),
                    ('Size', p.aval_size), ('Trigger', p.aval_trigger),
                    ('Probability', p.aval_probability),
                    ('Distribution', p.aval_distribution)
                ])
                if make_header:
                    f.write(
                        ' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                    make_header = False
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.values()]) + '\n')
def make_forecasts_for_Thea():
    """July 2018: Make list of avalanche forecasts danger levels for regions Voss, Romsdalen, Svartisen
    and Salten (and those before them) for Thea Møllerhaug Lunde (Jernbanedirektoratet).

    Voss-Bergen ligger i for det meste i Voss-regionen vår.
    Mo i Rana-Fauske ligger i Svartisen og Salten.
    Åndalsnes-Bjorli ligger i varslingsregionen Romsdalen."""

    pickle_file_name = '{0}201807_avalanche_forecasts_thea.pickle'.format(
        env.local_storage)

    get_new = False
    all_dangers = []

    if get_new:
        # Get Voss. ForecastRegionTID 124 form 2012-2016 and 3031 since.
        # Get Romsdalen. ForecastRegionTID 118 from 2012-2016 and 3023 since.
        # Get Svartisen. ForecastRegionTID 131 from 2012-2016 and 3017 since.
        # Get Salten. ForecastRegionTID 133 form 2012-2016 and 3016 since.

        years = ['2012-13', '2013-14', '2014-15', '2015-16']
        region_ids = [124, 118, 131, 133]

        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        years = ['2016-17', '2017-18']
        region_ids = [3031, 3023, 3017, 3016]

        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    output_forecast_problems = '{0}201807 Snøskredvarsel for Thea.txt'.format(
        env.output_folder)

    import collections as coll

    # Write forecasts to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for d in all_dangers:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(d.date, '%Y-%m-%d')),
                ('Region id', d.region_regobs_id),
                ('Region', d.region_name),
                ('DL', d.danger_level),
                ('Danger level', d.danger_level_name),
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')

    pass
def make_forecasts_for_Heidi():
    """July 2018: Make list of avalanche forecasts for regions Voss, Svartisen og Fauske (and those before them)
    for Heidi Bjordal SVV"""

    pickle_file_name = '{0}201807_avalanche_forecasts_heidi.pickle'.format(
        env.local_storage)

    get_new = False
    all_dangers = []

    if get_new:
        # Get Voss. ForecastRegionTID 124 form 2012-2016 and 3031 since.
        # Get Svartisen. ForecastRegionTID 131 form 2012-2016 and 3017 since.
        # Get Salten. ForecastRegionTID 133 form 2012-2016 and 3016 since.

        years = ['2012-13', '2013-14', '2014-15', '2015-16']
        region_ids = [124, 131, 133]

        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        years = ['2016-17', '2017-18']
        region_ids = [3031, 3017, 3016]

        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            all_dangers += gd.get_forecasted_dangers(region_ids, from_date,
                                                     to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    output_forecast_problems = '{0}201807 Snøskredvarsel for Heidi.txt'.format(
        env.output_folder)

    import collections as coll

    # Write forecasts to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for d in all_dangers:
            for p in d.avalanche_problems:
                out_data = coll.OrderedDict([
                    ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                    ('Region id', p.region_regobs_id),
                    ('Region', p.region_name), ('DL', p.danger_level),
                    ('Danger level', p.danger_level_name),
                    ('Problem order', p.order), ('Problem', p.problem),
                    ('Cause/ weaklayer', p.cause_name), ('Type', p.aval_type),
                    ('Size', p.aval_size), ('Trigger', p.aval_trigger),
                    ('Probability', p.aval_probability),
                    ('Distribution', p.aval_distribution)
                ])
                if make_header:
                    f.write(
                        ' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                    make_header = False
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.values()]) + '\n')

    pass
def make_avalanche_problemes_for_techel():
    """Gets forecastes and observed avalanche problems and dangers for Frank Techel.

    Takes 20-30 min to run a year.

    :return:
    """

    pickle_file_name = '{0}runavalancheproblems_techel.pickle'.format(
        env.local_storage)

    years = ['2014-15', '2015-16', '2016-17', '2017-18']
    get_new = False

    if get_new:
        forecast_problems = []
        forecast_dangers = []
        observed_dangers = []
        observed_problems = []

        for y in years:
            # Get forecast data. Different region ids from year to year.
            region_ids = gm.get_forecast_regions(year=y)
            from_date, to_date = gm.get_forecast_dates(y)
            forecast_problems += gp.get_forecasted_problems(region_ids,
                                                            from_date,
                                                            to_date,
                                                            lang_key=2)
            forecast_dangers += gd.get_forecasted_dangers(region_ids,
                                                          from_date,
                                                          to_date,
                                                          lang_key=2)

            # Get observed data. All older data in regObs have been mapped to new regions.
            region_ids = gm.get_forecast_regions(year='2016-17')
            from_date, to_date = gm.get_forecast_dates(
                y, padding=dt.timedelta(days=20))
            this_years_observed_dangers = gd.get_observed_dangers(region_ids,
                                                                  from_date,
                                                                  to_date,
                                                                  lang_key=2)
            this_years_observed_problems = gp.get_observed_problems(region_ids,
                                                                    from_date,
                                                                    to_date,
                                                                    lang_key=2)

            # Update observations with forecast region ids and names used the respective years
            for od in this_years_observed_dangers:
                utm33x = od.metadata['Original data'].UTMEast
                utm33y = od.metadata['Original data'].UTMNorth
                region_id, region_name = gm.get_forecast_region_for_coordinate(
                    utm33x, utm33y, y)
                od.region_regobs_id = region_id
                od.region_name = region_name

            for op in this_years_observed_problems:
                utm33x = op.metadata['Original data']['UtmEast']
                utm33y = op.metadata['Original data']['UtmNorth']
                region_id, region_name = gm.get_forecast_region_for_coordinate(
                    utm33x, utm33y, y)
                op.region_regobs_id = region_id
                op.region_name = region_name

            observed_dangers += this_years_observed_dangers
            observed_problems += this_years_observed_problems

        mp.pickle_anything([
            forecast_problems, forecast_dangers, observed_dangers,
            observed_problems
        ], pickle_file_name)

    else:
        [
            forecast_problems, forecast_dangers, observed_dangers,
            observed_problems
        ] = mp.unpickle_anything(pickle_file_name)

    # Run EAWS mapping on all problems
    for p in forecast_problems:
        p.map_to_eaws_problems()

    for p in observed_problems:
        p.map_to_eaws_problems()

    output_forecast_problems = '{0}Techel forecast problems.csv'.format(
        env.output_folder)
    output_forecast_dangers = '{0}Techel forecast dangers.csv'.format(
        env.output_folder)
    output_observed_problems = '{0}Techel observed problems.csv'.format(
        env.output_folder)
    output_observed_dangers = '{0}Techel observed dangers.csv'.format(
        env.output_folder)

    import collections as coll

    # Write observed dangers to file
    with open(output_observed_dangers, 'w', encoding='utf-8') as f:
        make_header = True
        for d in observed_dangers:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(d.date, '%Y-%m-%d')),
                ('Reg time',
                 dt.datetime.strftime(d.registration_time, '%Y-%m-%d %H:%M')),
                ('Region id', d.region_regobs_id),
                ('Region', d.region_name),
                ('Municipal', d.municipal_name),
                ('Nick', d.nick),
                ('Competence', d.competence_level),
                ('DL', d.danger_level),
                ('Danger level', d.danger_level_name),
                ('Forecast correct', d.forecast_correct),
                # ('Table', d.data_table),
                # ('URL', d.url),
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')

    # Write forecasted dangers to file
    with open(output_forecast_dangers, 'w', encoding='utf-8') as f:
        make_header = True
        for d in forecast_dangers:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(d.date, '%Y-%m-%d')),
                ('Region id', d.region_regobs_id),
                ('Region', d.region_name),
                ('Nick', d.nick),
                ('DL', d.danger_level),
                ('Danger level', d.danger_level_name),
                # ('Table', d.data_table),
                # ('URL', d.url),
                ('Main message', ' '.join(d.main_message_en.split()))
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')

    # Write forecasted problems to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for p in forecast_problems:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                ('Region id', p.region_regobs_id),
                ('Region', p.region_name),
                ('Nick', p.nick_name),
                ('Problem order', p.order),
                ('Problem', p.problem),
                ('EAWS problem', p.eaws_problem),
                ('Cause/ weaklayer', p.cause_name),
                # ('TypeTID', p.aval_type_tid),
                ('Type', p.aval_type),
                ('Size', p.aval_size),
                ('Trigger', p.aval_trigger),
                ('Probability', p.aval_probability),
                ('Distribution', p.aval_distribution),
                ('DL', p.danger_level),
                ('Danger level', p.danger_level_name),
                # ('Table', p.regobs_table),
                # ('URL', p.url)
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')

    # Write observed problems to file
    with open(output_observed_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for p in observed_problems:
            out_data = coll.OrderedDict([
                ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                ('Reg time',
                 dt.datetime.strftime(p.registration_time, '%Y-%m-%d %H:%M')),
                ('Region id', p.region_regobs_id),
                ('Region', p.region_name),
                ('Municipal', p.municipal_name),
                ('Nick', p.nick_name),
                ('Competence', p.competence_level),
                # ('Problem order', p.order),
                ('EAWS problem', p.eaws_problem),
                ('Cause/ weaklayer', p.cause_name),
                # ('TypeTID', p.aval_type_tid),
                ('Type', p.aval_type),
                ('Catch 1', p.cause_attribute_crystal),
                ('Catch 2', p.cause_attribute_light),
                ('Catch 3', p.cause_attribute_soft),
                ('Catch 4', p.cause_attribute_thin),
                ('Size', p.aval_size),
                ('Trigger', p.aval_trigger),
                # ('Probability', p.aval_probability),
                # ('Distribution', p.aval_distribution),
                # ('RegID', p.regid),
                # ('Table', p.regobs_table),
                # ('URL', p.url)
            ])
            if make_header:
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.values()]) + '\n')
def make_forecasts_at_incidents_for_sander():
    """Lager csv med alle varsomhendelser sammen med faregrad og de aktuelle skredproblemene 
    (svakt lag, skredtype og skredproblemnavnert). Der det er gjort en regObs observasjon 
    med «hendelse/ulykke» skjema fylt ut har jeg også lagt på skadeomfangsvurderingen.

    August 2018: Hei Jostein.

    Som du veit skal eg skriva om: Skredulykker knytt til skredproblem
    Du snakka om at det var muleg å få ut data for dette frå NVE sin database. Kan du hjelpa meg med det?

    Mvh
    Sander
    """

    pickle_file_name = '{0}dl_inci_sander.pickle'.format(env.local_storage)
    output_incident_and_dl = '{0}Hendelse og faregrad til Sander.csv'.format(
        env.output_folder)
    get_new = False

    if get_new:
        varsom_incidents = gm.get_varsom_incidents(add_forecast_regions=True,
                                                   add_forecasts=True,
                                                   add_observations=True)
        mp.pickle_anything(varsom_incidents, pickle_file_name)
    else:
        varsom_incidents = mp.unpickle_anything(pickle_file_name)

    incident_and_dl = []

    for i in varsom_incidents:

        incident_date = i.date
        danger_level = None

        problem_1 = None
        problem_2 = None
        problem_3 = None

        avalanche_type_1 = None
        avalanche_type_2 = None
        avalanche_type_3 = None

        weak_layer_1 = None
        weak_layer_2 = None
        weak_layer_3 = None

        dato_regobs = None
        damage_extent = None

        if i.forecast:
            danger_level = i.forecast.danger_level
            for p in i.forecast.avalanche_problems:
                if p.order == 1:
                    problem_1 = p.problem
                    weak_layer_1 = p.cause_name
                    avalanche_type_1 = p.aval_type
                if p.order == 2:
                    problem_2 = p.problem
                    weak_layer_2 = p.cause_name
                    avalanche_type_2 = p.aval_type
                if p.order == 3:
                    problem_3 = p.problem
                    weak_layer_3 = p.cause_name
                    avalanche_type_3 = p.aval_type

            if i.observations:
                dato_regobs = i.observations[0].DtObsTime.date()
                for obs in i.observations:
                    for o in obs.Observations:
                        if isinstance(o, go.Incident):
                            damage_extent = o.DamageExtentName

        incident_and_dl.append({
            'Dato': incident_date,
            # 'Dato (regObs)': dato_regobs,
            'Region': i.region_name,
            'Kommune': i.municipality,
            'Dødsfall': i.fatalities,
            'Alvorsgrad': damage_extent,
            'Involverte': i.people_involved,
            'Aktivitet': i.activity,
            'Faregrad': danger_level,
            'Skredproblem 1': problem_1,
            'Skredtype 1': avalanche_type_1,
            'Svaktlag 1': weak_layer_1,
            'Skredproblem 2': problem_2,
            'Skredtype 2': avalanche_type_2,
            'Svaktlag 2': weak_layer_2,
            'Skredproblem 3': problem_3,
            'Skredtype 3': avalanche_type_3,
            'Svaktlag 3': weak_layer_3,
            'Kommentar': i.comment,
            'regObs': '{}'.format(i.regid)
        })

    # Write observed problems to file
    with open(output_incident_and_dl, 'w', encoding='utf-8') as f:
        make_header = True
        for i in incident_and_dl:
            if make_header:
                f.write(' ;'.join([fe.make_str(d) for d in i.keys()]) + '\n')
                make_header = False
            f.write(' ;'.join([fe.make_str(d) for d in i.values()]).replace(
                '[', '').replace(']', '') + '\n')
示例#6
0
def make_forecasts_for_Espen_at_sweco():
    """Hei. I forbindelse med et prosjekt i Sørreisa i Troms ønsker vi å gi råd til vår kunde om evakuering av bygg
    i skredutsatt terreng. Som en del av vår vurdering hadde det vært veldig nyttig med statistikk for varslingen,
    altså statistikk om hvor ofte de ulike faregradene er varslet. Er det mulig å få tak i slik statistikk?
    Gjerne så langt tilbake i tid som mulig. Vennlig hilsen Espen Eidsvåg"""

    pickle_file_name = '{0}forecasts_sorreisa_espen.pickle'.format(
        env.local_storage)

    get_new = True
    all_dangers = []

    if get_new:

        years = ['2012-13', '2013-14', '2014-15', '2015-16']
        region_ids = [110, 112]  # Senja, Bardu
        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            for region_id in region_ids:
                all_dangers += gd.get_forecasted_dangers(
                    region_id, from_date, to_date)

        years = ['2016-17', '2017-18', '2018-19']
        region_ids = [3012, 3013]  # Sør Troms, Indre Troms
        for y in years:
            from_date, to_date = gm.get_forecast_dates(y)
            for region_id in region_ids:
                all_dangers += gd.get_forecasted_dangers(
                    region_id, from_date, to_date)

        mp.pickle_anything(all_dangers, pickle_file_name)

    else:
        all_dangers = mp.unpickle_anything(pickle_file_name)

    output_forecast_problems = '{0}Varsel for Sørreisa.Espen Eidsvåg Sweco.csv'.format(
        env.output_folder)

    import collections as coll

    # Write forecasts to file
    with open(output_forecast_problems, 'w', encoding='utf-8') as f:
        make_header = True
        for d in all_dangers:
            for p in d.avalanche_problems:
                out_data = coll.OrderedDict([
                    ('Date', dt.date.strftime(p.date, '%Y-%m-%d')),
                    ('Region id', p.region_regobs_id),
                    ('Region', p.region_name), ('DL', p.danger_level),
                    ('Danger level', p.danger_level_name),
                    ('Problem order', p.order), ('Problem', p.problem),
                    ('Cause/ weaklayer', p.cause_name), ('Type', p.aval_type),
                    ('Size', p.aval_size), ('Trigger', p.aval_trigger),
                    ('Probability', p.aval_probability),
                    ('Distribution', p.aval_distribution)
                ])
                if make_header:
                    f.write(
                        ' ;'.join([fe.make_str(d)
                                   for d in out_data.keys()]) + '\n')
                    make_header = False
                f.write(' ;'.join([fe.make_str(d)
                                   for d in out_data.values()]) + '\n')
示例#7
0
def make_forecasts_at_incidents_for_mala(get_new=False):
    """Lager csv med alle varsomhendelser sammen med faregrad og de aktuelle skredproblemene
    (svakt lag, skredtype og skredproblemnavnert). Der det er gjort en regObs observasjon
    med «hendelse/ulykke» skjema fylt ut har jeg også lagt på skadeomfangsvurderingen.
    """

    pickle_file_name = '{0}dl_inci_mala.pickle'.format(env.local_storage)
    output_incident_and_dl = '{0}incidents_mala.csv'.format(env.output_folder)

    if get_new:
        varsom_incidents = gm.get_varsom_incidents(add_forecast_regions=True,
                                                   add_forecasts=True,
                                                   add_observations=False)
        mp.pickle_anything(varsom_incidents, pickle_file_name)
    else:
        varsom_incidents = mp.unpickle_anything(pickle_file_name)

    incident_and_dl = []

    for i in varsom_incidents:

        incident_date = i.date
        danger_level = None

        problem_1 = None
        problem_2 = None
        problem_3 = None

        avalanche_type_1 = None
        avalanche_type_2 = None
        avalanche_type_3 = None

        weak_layer_1 = None
        weak_layer_2 = None
        weak_layer_3 = None

        dato_regobs = None
        damage_extent = None

        if i.forecast:
            danger_level = i.forecast.danger_level
            for p in i.forecast.avalanche_problems:
                if p.order == 1:
                    problem_1 = p.problem
                    weak_layer_1 = p.cause_name
                    avalanche_type_1 = p.aval_type
                if p.order == 2:
                    problem_2 = p.problem
                    weak_layer_2 = p.cause_name
                    avalanche_type_2 = p.aval_type
                if p.order == 3:
                    problem_3 = p.problem
                    weak_layer_3 = p.cause_name
                    avalanche_type_3 = p.aval_type

            if i.observations:
                dato_regobs = i.observations[0].DtObsTime.date()
                for obs in i.observations:
                    for o in obs.Observations:
                        if isinstance(o, go.Incident):
                            damage_extent = o.DamageExtentName

        incident_and_dl.append({
            'Date': incident_date,
            # 'Dato (regObs)': dato_regobs,
            'Region_id': i.region_id,
            'Region': i.region_name,
            'Fatalities': i.fatalities,
            'Damage_extent': damage_extent,
            'People_involved': i.people_involved,
            'Activity': i.activity,
            'Danger_level': danger_level,
            'Avalanche_problem_1': problem_1,
            'Avalanche_type_1': avalanche_type_1,
            'Weak_layer_1': weak_layer_1,
            'Avalanche_problem_2': problem_2,
            'Avalanche_type_2': avalanche_type_2,
            'Weak_layer_2': weak_layer_2,
            'Avalanche_problem_3': problem_3,
            'Avalanche_type_3': avalanche_type_3,
            'Weak_layer_3': weak_layer_3,
            'Comment': i.comment,
            'regObs_id': '{}'.format(i.regid)
        })

    # Write observed problems to file
    with open(output_incident_and_dl, 'w', encoding='utf-8') as f:
        make_header = True
        for i in incident_and_dl:
            if make_header:
                f.write(';'.join([fe.make_str(d) for d in i.keys()]) + '\n')
                make_header = False
            f.write(';'.join([fe.make_str(d) for d in i.values()]).replace(
                '[', '').replace(']', '') + '\n')
# Write observed dangers to file
with open(output_observed_dangers, 'w', encoding='utf-8') as f:
    make_header = True
    for d in observed_dangers:
        out_data = coll.OrderedDict([
            ('Date', dt.date.strftime(d.date, '%Y-%m-%d')),
            ('Reg time',
             dt.datetime.strftime(d.registration_time, '%Y-%m-%d %H:%M')),
            ('Region id', d.region_regobs_id), ('Region', d.region_name),
            ('Municipal', d.municipal_name), ('Nick', d.nick),
            ('Competence', d.competence_level), ('DL', d.danger_level),
            ('Danger level', d.danger_level_name),
            ('Forecast correct', d.forecast_correct)
        ])
        if make_header:
            f.write(' ;'.join([fe.make_str(d)
                               for d in out_data.keys()]) + '\n')
            make_header = False
        f.write(' ;'.join([fe.make_str(d) for d in out_data.values()]) + '\n')

# Write forecasted dangers to file
with open(output_forecast_dangers, 'w', encoding='utf-8') as f:
    make_header = True
    for d in forecast_dangers:
        out_data = coll.OrderedDict([
            ('Date', dt.date.strftime(d.date, '%Y-%m-%d')),
            ('Region id', d.region_regobs_id), ('Region', d.region_name),
            ('Nick', d.nick), ('DL', d.danger_level),
            ('Danger level', d.danger_level_name),
            ('Main message', ' '.join(d.main_message_en.split()))
        ])